1 /* 2 * Copyright © 2006-2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/time.h> 25 26 #include "intel_atomic.h" 27 #include "intel_bw.h" 28 #include "intel_cdclk.h" 29 #include "intel_display_types.h" 30 #include "intel_sideband.h" 31 32 /** 33 * DOC: CDCLK / RAWCLK 34 * 35 * The display engine uses several different clocks to do its work. There 36 * are two main clocks involved that aren't directly related to the actual 37 * pixel clock or any symbol/bit clock of the actual output port. These 38 * are the core display clock (CDCLK) and RAWCLK. 39 * 40 * CDCLK clocks most of the display pipe logic, and thus its frequency 41 * must be high enough to support the rate at which pixels are flowing 42 * through the pipes. Downscaling must also be accounted as that increases 43 * the effective pixel rate. 44 * 45 * On several platforms the CDCLK frequency can be changed dynamically 46 * to minimize power consumption for a given display configuration. 47 * Typically changes to the CDCLK frequency require all the display pipes 48 * to be shut down while the frequency is being changed. 49 * 50 * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit. 51 * DMC will not change the active CDCLK frequency however, so that part 52 * will still be performed by the driver directly. 53 * 54 * RAWCLK is a fixed frequency clock, often used by various auxiliary 55 * blocks such as AUX CH or backlight PWM. Hence the only thing we 56 * really need to know about RAWCLK is its frequency so that various 57 * dividers can be programmed correctly. 58 */ 59 60 static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, 61 struct intel_cdclk_config *cdclk_config) 62 { 63 cdclk_config->cdclk = 133333; 64 } 65 66 static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv, 67 struct intel_cdclk_config *cdclk_config) 68 { 69 cdclk_config->cdclk = 200000; 70 } 71 72 static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv, 73 struct intel_cdclk_config *cdclk_config) 74 { 75 cdclk_config->cdclk = 266667; 76 } 77 78 static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv, 79 struct intel_cdclk_config *cdclk_config) 80 { 81 cdclk_config->cdclk = 333333; 82 } 83 84 static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv, 85 struct intel_cdclk_config *cdclk_config) 86 { 87 cdclk_config->cdclk = 400000; 88 } 89 90 static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv, 91 struct intel_cdclk_config *cdclk_config) 92 { 93 cdclk_config->cdclk = 450000; 94 } 95 96 static void i85x_get_cdclk(struct drm_i915_private *dev_priv, 97 struct intel_cdclk_config *cdclk_config) 98 { 99 struct pci_dev *pdev = dev_priv->drm.pdev; 100 u16 hpllcc = 0; 101 102 /* 103 * 852GM/852GMV only supports 133 MHz and the HPLLCC 104 * encoding is different :( 105 * FIXME is this the right way to detect 852GM/852GMV? 106 */ 107 if (pdev->revision == 0x1) { 108 cdclk_config->cdclk = 133333; 109 return; 110 } 111 112 pci_bus_read_config_word(pdev->bus, 113 PCI_DEVFN(0, 3), HPLLCC, &hpllcc); 114 115 /* Assume that the hardware is in the high speed state. This 116 * should be the default. 117 */ 118 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 119 case GC_CLOCK_133_200: 120 case GC_CLOCK_133_200_2: 121 case GC_CLOCK_100_200: 122 cdclk_config->cdclk = 200000; 123 break; 124 case GC_CLOCK_166_250: 125 cdclk_config->cdclk = 250000; 126 break; 127 case GC_CLOCK_100_133: 128 cdclk_config->cdclk = 133333; 129 break; 130 case GC_CLOCK_133_266: 131 case GC_CLOCK_133_266_2: 132 case GC_CLOCK_166_266: 133 cdclk_config->cdclk = 266667; 134 break; 135 } 136 } 137 138 static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, 139 struct intel_cdclk_config *cdclk_config) 140 { 141 struct pci_dev *pdev = dev_priv->drm.pdev; 142 u16 gcfgc = 0; 143 144 pci_read_config_word(pdev, GCFGC, &gcfgc); 145 146 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { 147 cdclk_config->cdclk = 133333; 148 return; 149 } 150 151 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 152 case GC_DISPLAY_CLOCK_333_320_MHZ: 153 cdclk_config->cdclk = 333333; 154 break; 155 default: 156 case GC_DISPLAY_CLOCK_190_200_MHZ: 157 cdclk_config->cdclk = 190000; 158 break; 159 } 160 } 161 162 static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, 163 struct intel_cdclk_config *cdclk_config) 164 { 165 struct pci_dev *pdev = dev_priv->drm.pdev; 166 u16 gcfgc = 0; 167 168 pci_read_config_word(pdev, GCFGC, &gcfgc); 169 170 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { 171 cdclk_config->cdclk = 133333; 172 return; 173 } 174 175 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 176 case GC_DISPLAY_CLOCK_333_320_MHZ: 177 cdclk_config->cdclk = 320000; 178 break; 179 default: 180 case GC_DISPLAY_CLOCK_190_200_MHZ: 181 cdclk_config->cdclk = 200000; 182 break; 183 } 184 } 185 186 static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) 187 { 188 static const unsigned int blb_vco[8] = { 189 [0] = 3200000, 190 [1] = 4000000, 191 [2] = 5333333, 192 [3] = 4800000, 193 [4] = 6400000, 194 }; 195 static const unsigned int pnv_vco[8] = { 196 [0] = 3200000, 197 [1] = 4000000, 198 [2] = 5333333, 199 [3] = 4800000, 200 [4] = 2666667, 201 }; 202 static const unsigned int cl_vco[8] = { 203 [0] = 3200000, 204 [1] = 4000000, 205 [2] = 5333333, 206 [3] = 6400000, 207 [4] = 3333333, 208 [5] = 3566667, 209 [6] = 4266667, 210 }; 211 static const unsigned int elk_vco[8] = { 212 [0] = 3200000, 213 [1] = 4000000, 214 [2] = 5333333, 215 [3] = 4800000, 216 }; 217 static const unsigned int ctg_vco[8] = { 218 [0] = 3200000, 219 [1] = 4000000, 220 [2] = 5333333, 221 [3] = 6400000, 222 [4] = 2666667, 223 [5] = 4266667, 224 }; 225 const unsigned int *vco_table; 226 unsigned int vco; 227 u8 tmp = 0; 228 229 /* FIXME other chipsets? */ 230 if (IS_GM45(dev_priv)) 231 vco_table = ctg_vco; 232 else if (IS_G45(dev_priv)) 233 vco_table = elk_vco; 234 else if (IS_I965GM(dev_priv)) 235 vco_table = cl_vco; 236 else if (IS_PINEVIEW(dev_priv)) 237 vco_table = pnv_vco; 238 else if (IS_G33(dev_priv)) 239 vco_table = blb_vco; 240 else 241 return 0; 242 243 tmp = intel_de_read(dev_priv, 244 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); 245 246 vco = vco_table[tmp & 0x7]; 247 if (vco == 0) 248 drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", 249 tmp); 250 else 251 drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco); 252 253 return vco; 254 } 255 256 static void g33_get_cdclk(struct drm_i915_private *dev_priv, 257 struct intel_cdclk_config *cdclk_config) 258 { 259 struct pci_dev *pdev = dev_priv->drm.pdev; 260 static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; 261 static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; 262 static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; 263 static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 }; 264 const u8 *div_table; 265 unsigned int cdclk_sel; 266 u16 tmp = 0; 267 268 cdclk_config->vco = intel_hpll_vco(dev_priv); 269 270 pci_read_config_word(pdev, GCFGC, &tmp); 271 272 cdclk_sel = (tmp >> 4) & 0x7; 273 274 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 275 goto fail; 276 277 switch (cdclk_config->vco) { 278 case 3200000: 279 div_table = div_3200; 280 break; 281 case 4000000: 282 div_table = div_4000; 283 break; 284 case 4800000: 285 div_table = div_4800; 286 break; 287 case 5333333: 288 div_table = div_5333; 289 break; 290 default: 291 goto fail; 292 } 293 294 cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, 295 div_table[cdclk_sel]); 296 return; 297 298 fail: 299 drm_err(&dev_priv->drm, 300 "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", 301 cdclk_config->vco, tmp); 302 cdclk_config->cdclk = 190476; 303 } 304 305 static void pnv_get_cdclk(struct drm_i915_private *dev_priv, 306 struct intel_cdclk_config *cdclk_config) 307 { 308 struct pci_dev *pdev = dev_priv->drm.pdev; 309 u16 gcfgc = 0; 310 311 pci_read_config_word(pdev, GCFGC, &gcfgc); 312 313 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 314 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 315 cdclk_config->cdclk = 266667; 316 break; 317 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 318 cdclk_config->cdclk = 333333; 319 break; 320 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 321 cdclk_config->cdclk = 444444; 322 break; 323 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 324 cdclk_config->cdclk = 200000; 325 break; 326 default: 327 drm_err(&dev_priv->drm, 328 "Unknown pnv display core clock 0x%04x\n", gcfgc); 329 fallthrough; 330 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 331 cdclk_config->cdclk = 133333; 332 break; 333 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 334 cdclk_config->cdclk = 166667; 335 break; 336 } 337 } 338 339 static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, 340 struct intel_cdclk_config *cdclk_config) 341 { 342 struct pci_dev *pdev = dev_priv->drm.pdev; 343 static const u8 div_3200[] = { 16, 10, 8 }; 344 static const u8 div_4000[] = { 20, 12, 10 }; 345 static const u8 div_5333[] = { 24, 16, 14 }; 346 const u8 *div_table; 347 unsigned int cdclk_sel; 348 u16 tmp = 0; 349 350 cdclk_config->vco = intel_hpll_vco(dev_priv); 351 352 pci_read_config_word(pdev, GCFGC, &tmp); 353 354 cdclk_sel = ((tmp >> 8) & 0x1f) - 1; 355 356 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 357 goto fail; 358 359 switch (cdclk_config->vco) { 360 case 3200000: 361 div_table = div_3200; 362 break; 363 case 4000000: 364 div_table = div_4000; 365 break; 366 case 5333333: 367 div_table = div_5333; 368 break; 369 default: 370 goto fail; 371 } 372 373 cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, 374 div_table[cdclk_sel]); 375 return; 376 377 fail: 378 drm_err(&dev_priv->drm, 379 "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", 380 cdclk_config->vco, tmp); 381 cdclk_config->cdclk = 200000; 382 } 383 384 static void gm45_get_cdclk(struct drm_i915_private *dev_priv, 385 struct intel_cdclk_config *cdclk_config) 386 { 387 struct pci_dev *pdev = dev_priv->drm.pdev; 388 unsigned int cdclk_sel; 389 u16 tmp = 0; 390 391 cdclk_config->vco = intel_hpll_vco(dev_priv); 392 393 pci_read_config_word(pdev, GCFGC, &tmp); 394 395 cdclk_sel = (tmp >> 12) & 0x1; 396 397 switch (cdclk_config->vco) { 398 case 2666667: 399 case 4000000: 400 case 5333333: 401 cdclk_config->cdclk = cdclk_sel ? 333333 : 222222; 402 break; 403 case 3200000: 404 cdclk_config->cdclk = cdclk_sel ? 320000 : 228571; 405 break; 406 default: 407 drm_err(&dev_priv->drm, 408 "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", 409 cdclk_config->vco, tmp); 410 cdclk_config->cdclk = 222222; 411 break; 412 } 413 } 414 415 static void hsw_get_cdclk(struct drm_i915_private *dev_priv, 416 struct intel_cdclk_config *cdclk_config) 417 { 418 u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); 419 u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; 420 421 if (lcpll & LCPLL_CD_SOURCE_FCLK) 422 cdclk_config->cdclk = 800000; 423 else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) 424 cdclk_config->cdclk = 450000; 425 else if (freq == LCPLL_CLK_FREQ_450) 426 cdclk_config->cdclk = 450000; 427 else if (IS_HSW_ULT(dev_priv)) 428 cdclk_config->cdclk = 337500; 429 else 430 cdclk_config->cdclk = 540000; 431 } 432 433 static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) 434 { 435 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 436 333333 : 320000; 437 438 /* 439 * We seem to get an unstable or solid color picture at 200MHz. 440 * Not sure what's wrong. For now use 200MHz only when all pipes 441 * are off. 442 */ 443 if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320) 444 return 400000; 445 else if (min_cdclk > 266667) 446 return freq_320; 447 else if (min_cdclk > 0) 448 return 266667; 449 else 450 return 200000; 451 } 452 453 static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) 454 { 455 if (IS_VALLEYVIEW(dev_priv)) { 456 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 457 return 2; 458 else if (cdclk >= 266667) 459 return 1; 460 else 461 return 0; 462 } else { 463 /* 464 * Specs are full of misinformation, but testing on actual 465 * hardware has shown that we just need to write the desired 466 * CCK divider into the Punit register. 467 */ 468 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 469 } 470 } 471 472 static void vlv_get_cdclk(struct drm_i915_private *dev_priv, 473 struct intel_cdclk_config *cdclk_config) 474 { 475 u32 val; 476 477 vlv_iosf_sb_get(dev_priv, 478 BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); 479 480 cdclk_config->vco = vlv_get_hpll_vco(dev_priv); 481 cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk", 482 CCK_DISPLAY_CLOCK_CONTROL, 483 cdclk_config->vco); 484 485 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 486 487 vlv_iosf_sb_put(dev_priv, 488 BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); 489 490 if (IS_VALLEYVIEW(dev_priv)) 491 cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >> 492 DSPFREQGUAR_SHIFT; 493 else 494 cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >> 495 DSPFREQGUAR_SHIFT_CHV; 496 } 497 498 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 499 { 500 unsigned int credits, default_credits; 501 502 if (IS_CHERRYVIEW(dev_priv)) 503 default_credits = PFI_CREDIT(12); 504 else 505 default_credits = PFI_CREDIT(8); 506 507 if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) { 508 /* CHV suggested value is 31 or 63 */ 509 if (IS_CHERRYVIEW(dev_priv)) 510 credits = PFI_CREDIT_63; 511 else 512 credits = PFI_CREDIT(15); 513 } else { 514 credits = default_credits; 515 } 516 517 /* 518 * WA - write default credits before re-programming 519 * FIXME: should we also set the resend bit here? 520 */ 521 intel_de_write(dev_priv, GCI_CONTROL, 522 VGA_FAST_MODE_DISABLE | default_credits); 523 524 intel_de_write(dev_priv, GCI_CONTROL, 525 VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND); 526 527 /* 528 * FIXME is this guaranteed to clear 529 * immediately or should we poll for it? 530 */ 531 drm_WARN_ON(&dev_priv->drm, 532 intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND); 533 } 534 535 static void vlv_set_cdclk(struct drm_i915_private *dev_priv, 536 const struct intel_cdclk_config *cdclk_config, 537 enum pipe pipe) 538 { 539 int cdclk = cdclk_config->cdclk; 540 u32 val, cmd = cdclk_config->voltage_level; 541 intel_wakeref_t wakeref; 542 543 switch (cdclk) { 544 case 400000: 545 case 333333: 546 case 320000: 547 case 266667: 548 case 200000: 549 break; 550 default: 551 MISSING_CASE(cdclk); 552 return; 553 } 554 555 /* There are cases where we can end up here with power domains 556 * off and a CDCLK frequency other than the minimum, like when 557 * issuing a modeset without actually changing any display after 558 * a system suspend. So grab the display core domain, which covers 559 * the HW blocks needed for the following programming. 560 */ 561 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); 562 563 vlv_iosf_sb_get(dev_priv, 564 BIT(VLV_IOSF_SB_CCK) | 565 BIT(VLV_IOSF_SB_BUNIT) | 566 BIT(VLV_IOSF_SB_PUNIT)); 567 568 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 569 val &= ~DSPFREQGUAR_MASK; 570 val |= (cmd << DSPFREQGUAR_SHIFT); 571 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); 572 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & 573 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 574 50)) { 575 drm_err(&dev_priv->drm, 576 "timed out waiting for CDclk change\n"); 577 } 578 579 if (cdclk == 400000) { 580 u32 divider; 581 582 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, 583 cdclk) - 1; 584 585 /* adjust cdclk divider */ 586 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 587 val &= ~CCK_FREQUENCY_VALUES; 588 val |= divider; 589 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 590 591 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 592 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 593 50)) 594 drm_err(&dev_priv->drm, 595 "timed out waiting for CDclk change\n"); 596 } 597 598 /* adjust self-refresh exit latency value */ 599 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 600 val &= ~0x7f; 601 602 /* 603 * For high bandwidth configs, we set a higher latency in the bunit 604 * so that the core display fetch happens in time to avoid underruns. 605 */ 606 if (cdclk == 400000) 607 val |= 4500 / 250; /* 4.5 usec */ 608 else 609 val |= 3000 / 250; /* 3.0 usec */ 610 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 611 612 vlv_iosf_sb_put(dev_priv, 613 BIT(VLV_IOSF_SB_CCK) | 614 BIT(VLV_IOSF_SB_BUNIT) | 615 BIT(VLV_IOSF_SB_PUNIT)); 616 617 intel_update_cdclk(dev_priv); 618 619 vlv_program_pfi_credits(dev_priv); 620 621 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 622 } 623 624 static void chv_set_cdclk(struct drm_i915_private *dev_priv, 625 const struct intel_cdclk_config *cdclk_config, 626 enum pipe pipe) 627 { 628 int cdclk = cdclk_config->cdclk; 629 u32 val, cmd = cdclk_config->voltage_level; 630 intel_wakeref_t wakeref; 631 632 switch (cdclk) { 633 case 333333: 634 case 320000: 635 case 266667: 636 case 200000: 637 break; 638 default: 639 MISSING_CASE(cdclk); 640 return; 641 } 642 643 /* There are cases where we can end up here with power domains 644 * off and a CDCLK frequency other than the minimum, like when 645 * issuing a modeset without actually changing any display after 646 * a system suspend. So grab the display core domain, which covers 647 * the HW blocks needed for the following programming. 648 */ 649 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); 650 651 vlv_punit_get(dev_priv); 652 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 653 val &= ~DSPFREQGUAR_MASK_CHV; 654 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 655 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); 656 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & 657 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 658 50)) { 659 drm_err(&dev_priv->drm, 660 "timed out waiting for CDclk change\n"); 661 } 662 663 vlv_punit_put(dev_priv); 664 665 intel_update_cdclk(dev_priv); 666 667 vlv_program_pfi_credits(dev_priv); 668 669 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 670 } 671 672 static int bdw_calc_cdclk(int min_cdclk) 673 { 674 if (min_cdclk > 540000) 675 return 675000; 676 else if (min_cdclk > 450000) 677 return 540000; 678 else if (min_cdclk > 337500) 679 return 450000; 680 else 681 return 337500; 682 } 683 684 static u8 bdw_calc_voltage_level(int cdclk) 685 { 686 switch (cdclk) { 687 default: 688 case 337500: 689 return 2; 690 case 450000: 691 return 0; 692 case 540000: 693 return 1; 694 case 675000: 695 return 3; 696 } 697 } 698 699 static void bdw_get_cdclk(struct drm_i915_private *dev_priv, 700 struct intel_cdclk_config *cdclk_config) 701 { 702 u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); 703 u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; 704 705 if (lcpll & LCPLL_CD_SOURCE_FCLK) 706 cdclk_config->cdclk = 800000; 707 else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) 708 cdclk_config->cdclk = 450000; 709 else if (freq == LCPLL_CLK_FREQ_450) 710 cdclk_config->cdclk = 450000; 711 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 712 cdclk_config->cdclk = 540000; 713 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 714 cdclk_config->cdclk = 337500; 715 else 716 cdclk_config->cdclk = 675000; 717 718 /* 719 * Can't read this out :( Let's assume it's 720 * at least what the CDCLK frequency requires. 721 */ 722 cdclk_config->voltage_level = 723 bdw_calc_voltage_level(cdclk_config->cdclk); 724 } 725 726 static void bdw_set_cdclk(struct drm_i915_private *dev_priv, 727 const struct intel_cdclk_config *cdclk_config, 728 enum pipe pipe) 729 { 730 int cdclk = cdclk_config->cdclk; 731 u32 val; 732 int ret; 733 734 if (drm_WARN(&dev_priv->drm, 735 (intel_de_read(dev_priv, LCPLL_CTL) & 736 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | 737 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | 738 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | 739 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, 740 "trying to change cdclk frequency with cdclk not enabled\n")) 741 return; 742 743 ret = sandybridge_pcode_write(dev_priv, 744 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 745 if (ret) { 746 drm_err(&dev_priv->drm, 747 "failed to inform pcode about cdclk change\n"); 748 return; 749 } 750 751 val = intel_de_read(dev_priv, LCPLL_CTL); 752 val |= LCPLL_CD_SOURCE_FCLK; 753 intel_de_write(dev_priv, LCPLL_CTL, val); 754 755 /* 756 * According to the spec, it should be enough to poll for this 1 us. 757 * However, extensive testing shows that this can take longer. 758 */ 759 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 760 LCPLL_CD_SOURCE_FCLK_DONE, 100)) 761 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 762 763 val = intel_de_read(dev_priv, LCPLL_CTL); 764 val &= ~LCPLL_CLK_FREQ_MASK; 765 766 switch (cdclk) { 767 default: 768 MISSING_CASE(cdclk); 769 fallthrough; 770 case 337500: 771 val |= LCPLL_CLK_FREQ_337_5_BDW; 772 break; 773 case 450000: 774 val |= LCPLL_CLK_FREQ_450; 775 break; 776 case 540000: 777 val |= LCPLL_CLK_FREQ_54O_BDW; 778 break; 779 case 675000: 780 val |= LCPLL_CLK_FREQ_675_BDW; 781 break; 782 } 783 784 intel_de_write(dev_priv, LCPLL_CTL, val); 785 786 val = intel_de_read(dev_priv, LCPLL_CTL); 787 val &= ~LCPLL_CD_SOURCE_FCLK; 788 intel_de_write(dev_priv, LCPLL_CTL, val); 789 790 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 791 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 792 drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); 793 794 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 795 cdclk_config->voltage_level); 796 797 intel_de_write(dev_priv, CDCLK_FREQ, 798 DIV_ROUND_CLOSEST(cdclk, 1000) - 1); 799 800 intel_update_cdclk(dev_priv); 801 } 802 803 static int skl_calc_cdclk(int min_cdclk, int vco) 804 { 805 if (vco == 8640000) { 806 if (min_cdclk > 540000) 807 return 617143; 808 else if (min_cdclk > 432000) 809 return 540000; 810 else if (min_cdclk > 308571) 811 return 432000; 812 else 813 return 308571; 814 } else { 815 if (min_cdclk > 540000) 816 return 675000; 817 else if (min_cdclk > 450000) 818 return 540000; 819 else if (min_cdclk > 337500) 820 return 450000; 821 else 822 return 337500; 823 } 824 } 825 826 static u8 skl_calc_voltage_level(int cdclk) 827 { 828 if (cdclk > 540000) 829 return 3; 830 else if (cdclk > 450000) 831 return 2; 832 else if (cdclk > 337500) 833 return 1; 834 else 835 return 0; 836 } 837 838 static void skl_dpll0_update(struct drm_i915_private *dev_priv, 839 struct intel_cdclk_config *cdclk_config) 840 { 841 u32 val; 842 843 cdclk_config->ref = 24000; 844 cdclk_config->vco = 0; 845 846 val = intel_de_read(dev_priv, LCPLL1_CTL); 847 if ((val & LCPLL_PLL_ENABLE) == 0) 848 return; 849 850 if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0)) 851 return; 852 853 val = intel_de_read(dev_priv, DPLL_CTRL1); 854 855 if (drm_WARN_ON(&dev_priv->drm, 856 (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | 857 DPLL_CTRL1_SSC(SKL_DPLL0) | 858 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != 859 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) 860 return; 861 862 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { 863 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): 864 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): 865 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): 866 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): 867 cdclk_config->vco = 8100000; 868 break; 869 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): 870 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): 871 cdclk_config->vco = 8640000; 872 break; 873 default: 874 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 875 break; 876 } 877 } 878 879 static void skl_get_cdclk(struct drm_i915_private *dev_priv, 880 struct intel_cdclk_config *cdclk_config) 881 { 882 u32 cdctl; 883 884 skl_dpll0_update(dev_priv, cdclk_config); 885 886 cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref; 887 888 if (cdclk_config->vco == 0) 889 goto out; 890 891 cdctl = intel_de_read(dev_priv, CDCLK_CTL); 892 893 if (cdclk_config->vco == 8640000) { 894 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 895 case CDCLK_FREQ_450_432: 896 cdclk_config->cdclk = 432000; 897 break; 898 case CDCLK_FREQ_337_308: 899 cdclk_config->cdclk = 308571; 900 break; 901 case CDCLK_FREQ_540: 902 cdclk_config->cdclk = 540000; 903 break; 904 case CDCLK_FREQ_675_617: 905 cdclk_config->cdclk = 617143; 906 break; 907 default: 908 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 909 break; 910 } 911 } else { 912 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 913 case CDCLK_FREQ_450_432: 914 cdclk_config->cdclk = 450000; 915 break; 916 case CDCLK_FREQ_337_308: 917 cdclk_config->cdclk = 337500; 918 break; 919 case CDCLK_FREQ_540: 920 cdclk_config->cdclk = 540000; 921 break; 922 case CDCLK_FREQ_675_617: 923 cdclk_config->cdclk = 675000; 924 break; 925 default: 926 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 927 break; 928 } 929 } 930 931 out: 932 /* 933 * Can't read this out :( Let's assume it's 934 * at least what the CDCLK frequency requires. 935 */ 936 cdclk_config->voltage_level = 937 skl_calc_voltage_level(cdclk_config->cdclk); 938 } 939 940 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 941 static int skl_cdclk_decimal(int cdclk) 942 { 943 return DIV_ROUND_CLOSEST(cdclk - 1000, 500); 944 } 945 946 static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, 947 int vco) 948 { 949 bool changed = dev_priv->skl_preferred_vco_freq != vco; 950 951 dev_priv->skl_preferred_vco_freq = vco; 952 953 if (changed) 954 intel_update_max_cdclk(dev_priv); 955 } 956 957 static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) 958 { 959 u32 val; 960 961 drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); 962 963 /* 964 * We always enable DPLL0 with the lowest link rate possible, but still 965 * taking into account the VCO required to operate the eDP panel at the 966 * desired frequency. The usual DP link rates operate with a VCO of 967 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 968 * The modeset code is responsible for the selection of the exact link 969 * rate later on, with the constraint of choosing a frequency that 970 * works with vco. 971 */ 972 val = intel_de_read(dev_priv, DPLL_CTRL1); 973 974 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 975 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 976 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 977 if (vco == 8640000) 978 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 979 SKL_DPLL0); 980 else 981 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 982 SKL_DPLL0); 983 984 intel_de_write(dev_priv, DPLL_CTRL1, val); 985 intel_de_posting_read(dev_priv, DPLL_CTRL1); 986 987 intel_de_write(dev_priv, LCPLL1_CTL, 988 intel_de_read(dev_priv, LCPLL1_CTL) | LCPLL_PLL_ENABLE); 989 990 if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) 991 drm_err(&dev_priv->drm, "DPLL0 not locked\n"); 992 993 dev_priv->cdclk.hw.vco = vco; 994 995 /* We'll want to keep using the current vco from now on. */ 996 skl_set_preferred_cdclk_vco(dev_priv, vco); 997 } 998 999 static void skl_dpll0_disable(struct drm_i915_private *dev_priv) 1000 { 1001 intel_de_write(dev_priv, LCPLL1_CTL, 1002 intel_de_read(dev_priv, LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 1003 if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) 1004 drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); 1005 1006 dev_priv->cdclk.hw.vco = 0; 1007 } 1008 1009 static void skl_set_cdclk(struct drm_i915_private *dev_priv, 1010 const struct intel_cdclk_config *cdclk_config, 1011 enum pipe pipe) 1012 { 1013 int cdclk = cdclk_config->cdclk; 1014 int vco = cdclk_config->vco; 1015 u32 freq_select, cdclk_ctl; 1016 int ret; 1017 1018 /* 1019 * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are 1020 * unsupported on SKL. In theory this should never happen since only 1021 * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not 1022 * supported on SKL either, see the above WA. WARN whenever trying to 1023 * use the corresponding VCO freq as that always leads to using the 1024 * minimum 308MHz CDCLK. 1025 */ 1026 drm_WARN_ON_ONCE(&dev_priv->drm, 1027 IS_SKYLAKE(dev_priv) && vco == 8640000); 1028 1029 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1030 SKL_CDCLK_PREPARE_FOR_CHANGE, 1031 SKL_CDCLK_READY_FOR_CHANGE, 1032 SKL_CDCLK_READY_FOR_CHANGE, 3); 1033 if (ret) { 1034 drm_err(&dev_priv->drm, 1035 "Failed to inform PCU about cdclk change (%d)\n", ret); 1036 return; 1037 } 1038 1039 /* Choose frequency for this cdclk */ 1040 switch (cdclk) { 1041 default: 1042 drm_WARN_ON(&dev_priv->drm, 1043 cdclk != dev_priv->cdclk.hw.bypass); 1044 drm_WARN_ON(&dev_priv->drm, vco != 0); 1045 fallthrough; 1046 case 308571: 1047 case 337500: 1048 freq_select = CDCLK_FREQ_337_308; 1049 break; 1050 case 450000: 1051 case 432000: 1052 freq_select = CDCLK_FREQ_450_432; 1053 break; 1054 case 540000: 1055 freq_select = CDCLK_FREQ_540; 1056 break; 1057 case 617143: 1058 case 675000: 1059 freq_select = CDCLK_FREQ_675_617; 1060 break; 1061 } 1062 1063 if (dev_priv->cdclk.hw.vco != 0 && 1064 dev_priv->cdclk.hw.vco != vco) 1065 skl_dpll0_disable(dev_priv); 1066 1067 cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); 1068 1069 if (dev_priv->cdclk.hw.vco != vco) { 1070 /* Wa Display #1183: skl,kbl,cfl */ 1071 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); 1072 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); 1073 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1074 } 1075 1076 /* Wa Display #1183: skl,kbl,cfl */ 1077 cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; 1078 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1079 intel_de_posting_read(dev_priv, CDCLK_CTL); 1080 1081 if (dev_priv->cdclk.hw.vco != vco) 1082 skl_dpll0_enable(dev_priv, vco); 1083 1084 /* Wa Display #1183: skl,kbl,cfl */ 1085 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); 1086 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1087 1088 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); 1089 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1090 1091 /* Wa Display #1183: skl,kbl,cfl */ 1092 cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; 1093 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1094 intel_de_posting_read(dev_priv, CDCLK_CTL); 1095 1096 /* inform PCU of the change */ 1097 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1098 cdclk_config->voltage_level); 1099 1100 intel_update_cdclk(dev_priv); 1101 } 1102 1103 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 1104 { 1105 u32 cdctl, expected; 1106 1107 /* 1108 * check if the pre-os initialized the display 1109 * There is SWF18 scratchpad register defined which is set by the 1110 * pre-os which can be used by the OS drivers to check the status 1111 */ 1112 if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 1113 goto sanitize; 1114 1115 intel_update_cdclk(dev_priv); 1116 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 1117 1118 /* Is PLL enabled and locked ? */ 1119 if (dev_priv->cdclk.hw.vco == 0 || 1120 dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) 1121 goto sanitize; 1122 1123 /* DPLL okay; verify the cdclock 1124 * 1125 * Noticed in some instances that the freq selection is correct but 1126 * decimal part is programmed wrong from BIOS where pre-os does not 1127 * enable display. Verify the same as well. 1128 */ 1129 cdctl = intel_de_read(dev_priv, CDCLK_CTL); 1130 expected = (cdctl & CDCLK_FREQ_SEL_MASK) | 1131 skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); 1132 if (cdctl == expected) 1133 /* All well; nothing to sanitize */ 1134 return; 1135 1136 sanitize: 1137 drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); 1138 1139 /* force cdclk programming */ 1140 dev_priv->cdclk.hw.cdclk = 0; 1141 /* force full PLL disable + enable */ 1142 dev_priv->cdclk.hw.vco = -1; 1143 } 1144 1145 static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) 1146 { 1147 struct intel_cdclk_config cdclk_config; 1148 1149 skl_sanitize_cdclk(dev_priv); 1150 1151 if (dev_priv->cdclk.hw.cdclk != 0 && 1152 dev_priv->cdclk.hw.vco != 0) { 1153 /* 1154 * Use the current vco as our initial 1155 * guess as to what the preferred vco is. 1156 */ 1157 if (dev_priv->skl_preferred_vco_freq == 0) 1158 skl_set_preferred_cdclk_vco(dev_priv, 1159 dev_priv->cdclk.hw.vco); 1160 return; 1161 } 1162 1163 cdclk_config = dev_priv->cdclk.hw; 1164 1165 cdclk_config.vco = dev_priv->skl_preferred_vco_freq; 1166 if (cdclk_config.vco == 0) 1167 cdclk_config.vco = 8100000; 1168 cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco); 1169 cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); 1170 1171 skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1172 } 1173 1174 static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) 1175 { 1176 struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; 1177 1178 cdclk_config.cdclk = cdclk_config.bypass; 1179 cdclk_config.vco = 0; 1180 cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); 1181 1182 skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1183 } 1184 1185 static const struct intel_cdclk_vals bxt_cdclk_table[] = { 1186 { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, 1187 { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, 1188 { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 }, 1189 { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 }, 1190 { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 }, 1191 {} 1192 }; 1193 1194 static const struct intel_cdclk_vals glk_cdclk_table[] = { 1195 { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 }, 1196 { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 }, 1197 { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 }, 1198 {} 1199 }; 1200 1201 static const struct intel_cdclk_vals cnl_cdclk_table[] = { 1202 { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 }, 1203 { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 }, 1204 { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 }, 1205 1206 { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 }, 1207 { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 }, 1208 { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 }, 1209 {} 1210 }; 1211 1212 static const struct intel_cdclk_vals icl_cdclk_table[] = { 1213 { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 }, 1214 { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, 1215 { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, 1216 { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 }, 1217 { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, 1218 { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, 1219 1220 { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 }, 1221 { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, 1222 { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, 1223 { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 }, 1224 { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, 1225 { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, 1226 1227 { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, 1228 { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, 1229 { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, 1230 { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, 1231 { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, 1232 { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, 1233 {} 1234 }; 1235 1236 static const struct intel_cdclk_vals rkl_cdclk_table[] = { 1237 { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 }, 1238 { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 }, 1239 { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 }, 1240 { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 }, 1241 { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 }, 1242 { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 }, 1243 1244 { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 }, 1245 { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 }, 1246 { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 }, 1247 { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 }, 1248 { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 }, 1249 { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 }, 1250 1251 { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 }, 1252 { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 }, 1253 { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 }, 1254 { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 }, 1255 { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 }, 1256 { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 }, 1257 {} 1258 }; 1259 1260 static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) 1261 { 1262 const struct intel_cdclk_vals *table = dev_priv->cdclk.table; 1263 int i; 1264 1265 for (i = 0; table[i].refclk; i++) 1266 if (table[i].refclk == dev_priv->cdclk.hw.ref && 1267 table[i].cdclk >= min_cdclk) 1268 return table[i].cdclk; 1269 1270 drm_WARN(&dev_priv->drm, 1, 1271 "Cannot satisfy minimum cdclk %d with refclk %u\n", 1272 min_cdclk, dev_priv->cdclk.hw.ref); 1273 return 0; 1274 } 1275 1276 static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) 1277 { 1278 const struct intel_cdclk_vals *table = dev_priv->cdclk.table; 1279 int i; 1280 1281 if (cdclk == dev_priv->cdclk.hw.bypass) 1282 return 0; 1283 1284 for (i = 0; table[i].refclk; i++) 1285 if (table[i].refclk == dev_priv->cdclk.hw.ref && 1286 table[i].cdclk == cdclk) 1287 return dev_priv->cdclk.hw.ref * table[i].ratio; 1288 1289 drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", 1290 cdclk, dev_priv->cdclk.hw.ref); 1291 return 0; 1292 } 1293 1294 static u8 bxt_calc_voltage_level(int cdclk) 1295 { 1296 return DIV_ROUND_UP(cdclk, 25000); 1297 } 1298 1299 static u8 cnl_calc_voltage_level(int cdclk) 1300 { 1301 if (cdclk > 336000) 1302 return 2; 1303 else if (cdclk > 168000) 1304 return 1; 1305 else 1306 return 0; 1307 } 1308 1309 static u8 icl_calc_voltage_level(int cdclk) 1310 { 1311 if (cdclk > 556800) 1312 return 2; 1313 else if (cdclk > 312000) 1314 return 1; 1315 else 1316 return 0; 1317 } 1318 1319 static u8 ehl_calc_voltage_level(int cdclk) 1320 { 1321 if (cdclk > 326400) 1322 return 3; 1323 else if (cdclk > 312000) 1324 return 2; 1325 else if (cdclk > 180000) 1326 return 1; 1327 else 1328 return 0; 1329 } 1330 1331 static u8 tgl_calc_voltage_level(int cdclk) 1332 { 1333 if (cdclk > 556800) 1334 return 3; 1335 else if (cdclk > 326400) 1336 return 2; 1337 else if (cdclk > 312000) 1338 return 1; 1339 else 1340 return 0; 1341 } 1342 1343 static void cnl_readout_refclk(struct drm_i915_private *dev_priv, 1344 struct intel_cdclk_config *cdclk_config) 1345 { 1346 if (intel_de_read(dev_priv, SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz) 1347 cdclk_config->ref = 24000; 1348 else 1349 cdclk_config->ref = 19200; 1350 } 1351 1352 static void icl_readout_refclk(struct drm_i915_private *dev_priv, 1353 struct intel_cdclk_config *cdclk_config) 1354 { 1355 u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; 1356 1357 switch (dssm) { 1358 default: 1359 MISSING_CASE(dssm); 1360 fallthrough; 1361 case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: 1362 cdclk_config->ref = 24000; 1363 break; 1364 case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: 1365 cdclk_config->ref = 19200; 1366 break; 1367 case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: 1368 cdclk_config->ref = 38400; 1369 break; 1370 } 1371 } 1372 1373 static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, 1374 struct intel_cdclk_config *cdclk_config) 1375 { 1376 u32 val, ratio; 1377 1378 if (INTEL_GEN(dev_priv) >= 11) 1379 icl_readout_refclk(dev_priv, cdclk_config); 1380 else if (IS_CANNONLAKE(dev_priv)) 1381 cnl_readout_refclk(dev_priv, cdclk_config); 1382 else 1383 cdclk_config->ref = 19200; 1384 1385 val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); 1386 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || 1387 (val & BXT_DE_PLL_LOCK) == 0) { 1388 /* 1389 * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but 1390 * setting it to zero is a way to signal that. 1391 */ 1392 cdclk_config->vco = 0; 1393 return; 1394 } 1395 1396 /* 1397 * CNL+ have the ratio directly in the PLL enable register, gen9lp had 1398 * it in a separate PLL control register. 1399 */ 1400 if (INTEL_GEN(dev_priv) >= 10) 1401 ratio = val & CNL_CDCLK_PLL_RATIO_MASK; 1402 else 1403 ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 1404 1405 cdclk_config->vco = ratio * cdclk_config->ref; 1406 } 1407 1408 static void bxt_get_cdclk(struct drm_i915_private *dev_priv, 1409 struct intel_cdclk_config *cdclk_config) 1410 { 1411 u32 divider; 1412 int div; 1413 1414 bxt_de_pll_readout(dev_priv, cdclk_config); 1415 1416 if (INTEL_GEN(dev_priv) >= 12) 1417 cdclk_config->bypass = cdclk_config->ref / 2; 1418 else if (INTEL_GEN(dev_priv) >= 11) 1419 cdclk_config->bypass = 50000; 1420 else 1421 cdclk_config->bypass = cdclk_config->ref; 1422 1423 if (cdclk_config->vco == 0) { 1424 cdclk_config->cdclk = cdclk_config->bypass; 1425 goto out; 1426 } 1427 1428 divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; 1429 1430 switch (divider) { 1431 case BXT_CDCLK_CD2X_DIV_SEL_1: 1432 div = 2; 1433 break; 1434 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 1435 drm_WARN(&dev_priv->drm, 1436 IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, 1437 "Unsupported divider\n"); 1438 div = 3; 1439 break; 1440 case BXT_CDCLK_CD2X_DIV_SEL_2: 1441 div = 4; 1442 break; 1443 case BXT_CDCLK_CD2X_DIV_SEL_4: 1444 drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10, 1445 "Unsupported divider\n"); 1446 div = 8; 1447 break; 1448 default: 1449 MISSING_CASE(divider); 1450 return; 1451 } 1452 1453 cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); 1454 1455 out: 1456 /* 1457 * Can't read this out :( Let's assume it's 1458 * at least what the CDCLK frequency requires. 1459 */ 1460 cdclk_config->voltage_level = 1461 dev_priv->display.calc_voltage_level(cdclk_config->cdclk); 1462 } 1463 1464 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) 1465 { 1466 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0); 1467 1468 /* Timeout 200us */ 1469 if (intel_de_wait_for_clear(dev_priv, 1470 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) 1471 drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); 1472 1473 dev_priv->cdclk.hw.vco = 0; 1474 } 1475 1476 static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) 1477 { 1478 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); 1479 u32 val; 1480 1481 val = intel_de_read(dev_priv, BXT_DE_PLL_CTL); 1482 val &= ~BXT_DE_PLL_RATIO_MASK; 1483 val |= BXT_DE_PLL_RATIO(ratio); 1484 intel_de_write(dev_priv, BXT_DE_PLL_CTL, val); 1485 1486 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 1487 1488 /* Timeout 200us */ 1489 if (intel_de_wait_for_set(dev_priv, 1490 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) 1491 drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); 1492 1493 dev_priv->cdclk.hw.vco = vco; 1494 } 1495 1496 static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv) 1497 { 1498 u32 val; 1499 1500 val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); 1501 val &= ~BXT_DE_PLL_PLL_ENABLE; 1502 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1503 1504 /* Timeout 200us */ 1505 if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) 1506 drm_err(&dev_priv->drm, 1507 "timeout waiting for CDCLK PLL unlock\n"); 1508 1509 dev_priv->cdclk.hw.vco = 0; 1510 } 1511 1512 static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) 1513 { 1514 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); 1515 u32 val; 1516 1517 val = CNL_CDCLK_PLL_RATIO(ratio); 1518 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1519 1520 val |= BXT_DE_PLL_PLL_ENABLE; 1521 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1522 1523 /* Timeout 200us */ 1524 if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) 1525 drm_err(&dev_priv->drm, 1526 "timeout waiting for CDCLK PLL lock\n"); 1527 1528 dev_priv->cdclk.hw.vco = vco; 1529 } 1530 1531 static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 1532 { 1533 if (INTEL_GEN(dev_priv) >= 12) { 1534 if (pipe == INVALID_PIPE) 1535 return TGL_CDCLK_CD2X_PIPE_NONE; 1536 else 1537 return TGL_CDCLK_CD2X_PIPE(pipe); 1538 } else if (INTEL_GEN(dev_priv) >= 11) { 1539 if (pipe == INVALID_PIPE) 1540 return ICL_CDCLK_CD2X_PIPE_NONE; 1541 else 1542 return ICL_CDCLK_CD2X_PIPE(pipe); 1543 } else { 1544 if (pipe == INVALID_PIPE) 1545 return BXT_CDCLK_CD2X_PIPE_NONE; 1546 else 1547 return BXT_CDCLK_CD2X_PIPE(pipe); 1548 } 1549 } 1550 1551 static void bxt_set_cdclk(struct drm_i915_private *dev_priv, 1552 const struct intel_cdclk_config *cdclk_config, 1553 enum pipe pipe) 1554 { 1555 int cdclk = cdclk_config->cdclk; 1556 int vco = cdclk_config->vco; 1557 u32 val, divider; 1558 int ret; 1559 1560 /* Inform power controller of upcoming frequency change. */ 1561 if (INTEL_GEN(dev_priv) >= 10) 1562 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1563 SKL_CDCLK_PREPARE_FOR_CHANGE, 1564 SKL_CDCLK_READY_FOR_CHANGE, 1565 SKL_CDCLK_READY_FOR_CHANGE, 3); 1566 else 1567 /* 1568 * BSpec requires us to wait up to 150usec, but that leads to 1569 * timeouts; the 2ms used here is based on experiment. 1570 */ 1571 ret = sandybridge_pcode_write_timeout(dev_priv, 1572 HSW_PCODE_DE_WRITE_FREQ_REQ, 1573 0x80000000, 150, 2); 1574 1575 if (ret) { 1576 drm_err(&dev_priv->drm, 1577 "Failed to inform PCU about cdclk change (err %d, freq %d)\n", 1578 ret, cdclk); 1579 return; 1580 } 1581 1582 /* cdclk = vco / 2 / div{1,1.5,2,4} */ 1583 switch (DIV_ROUND_CLOSEST(vco, cdclk)) { 1584 default: 1585 drm_WARN_ON(&dev_priv->drm, 1586 cdclk != dev_priv->cdclk.hw.bypass); 1587 drm_WARN_ON(&dev_priv->drm, vco != 0); 1588 fallthrough; 1589 case 2: 1590 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 1591 break; 1592 case 3: 1593 drm_WARN(&dev_priv->drm, 1594 IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, 1595 "Unsupported divider\n"); 1596 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 1597 break; 1598 case 4: 1599 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 1600 break; 1601 case 8: 1602 drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10, 1603 "Unsupported divider\n"); 1604 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 1605 break; 1606 } 1607 1608 if (INTEL_GEN(dev_priv) >= 10) { 1609 if (dev_priv->cdclk.hw.vco != 0 && 1610 dev_priv->cdclk.hw.vco != vco) 1611 cnl_cdclk_pll_disable(dev_priv); 1612 1613 if (dev_priv->cdclk.hw.vco != vco) 1614 cnl_cdclk_pll_enable(dev_priv, vco); 1615 1616 } else { 1617 if (dev_priv->cdclk.hw.vco != 0 && 1618 dev_priv->cdclk.hw.vco != vco) 1619 bxt_de_pll_disable(dev_priv); 1620 1621 if (dev_priv->cdclk.hw.vco != vco) 1622 bxt_de_pll_enable(dev_priv, vco); 1623 } 1624 1625 val = divider | skl_cdclk_decimal(cdclk) | 1626 bxt_cdclk_cd2x_pipe(dev_priv, pipe); 1627 1628 /* 1629 * Disable SSA Precharge when CD clock frequency < 500 MHz, 1630 * enable otherwise. 1631 */ 1632 if (IS_GEN9_LP(dev_priv) && cdclk >= 500000) 1633 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 1634 intel_de_write(dev_priv, CDCLK_CTL, val); 1635 1636 if (pipe != INVALID_PIPE) 1637 intel_wait_for_vblank(dev_priv, pipe); 1638 1639 if (INTEL_GEN(dev_priv) >= 10) { 1640 ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1641 cdclk_config->voltage_level); 1642 } else { 1643 /* 1644 * The timeout isn't specified, the 2ms used here is based on 1645 * experiment. 1646 * FIXME: Waiting for the request completion could be delayed 1647 * until the next PCODE request based on BSpec. 1648 */ 1649 ret = sandybridge_pcode_write_timeout(dev_priv, 1650 HSW_PCODE_DE_WRITE_FREQ_REQ, 1651 cdclk_config->voltage_level, 1652 150, 2); 1653 } 1654 1655 if (ret) { 1656 drm_err(&dev_priv->drm, 1657 "PCode CDCLK freq set failed, (err %d, freq %d)\n", 1658 ret, cdclk); 1659 return; 1660 } 1661 1662 intel_update_cdclk(dev_priv); 1663 1664 if (INTEL_GEN(dev_priv) >= 10) 1665 /* 1666 * Can't read out the voltage level :( 1667 * Let's just assume everything is as expected. 1668 */ 1669 dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level; 1670 } 1671 1672 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) 1673 { 1674 u32 cdctl, expected; 1675 int cdclk, vco; 1676 1677 intel_update_cdclk(dev_priv); 1678 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 1679 1680 if (dev_priv->cdclk.hw.vco == 0 || 1681 dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) 1682 goto sanitize; 1683 1684 /* DPLL okay; verify the cdclock 1685 * 1686 * Some BIOS versions leave an incorrect decimal frequency value and 1687 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, 1688 * so sanitize this register. 1689 */ 1690 cdctl = intel_de_read(dev_priv, CDCLK_CTL); 1691 /* 1692 * Let's ignore the pipe field, since BIOS could have configured the 1693 * dividers both synching to an active pipe, or asynchronously 1694 * (PIPE_NONE). 1695 */ 1696 cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); 1697 1698 /* Make sure this is a legal cdclk value for the platform */ 1699 cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk); 1700 if (cdclk != dev_priv->cdclk.hw.cdclk) 1701 goto sanitize; 1702 1703 /* Make sure the VCO is correct for the cdclk */ 1704 vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); 1705 if (vco != dev_priv->cdclk.hw.vco) 1706 goto sanitize; 1707 1708 expected = skl_cdclk_decimal(cdclk); 1709 1710 /* Figure out what CD2X divider we should be using for this cdclk */ 1711 switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco, 1712 dev_priv->cdclk.hw.cdclk)) { 1713 case 2: 1714 expected |= BXT_CDCLK_CD2X_DIV_SEL_1; 1715 break; 1716 case 3: 1717 expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5; 1718 break; 1719 case 4: 1720 expected |= BXT_CDCLK_CD2X_DIV_SEL_2; 1721 break; 1722 case 8: 1723 expected |= BXT_CDCLK_CD2X_DIV_SEL_4; 1724 break; 1725 default: 1726 goto sanitize; 1727 } 1728 1729 /* 1730 * Disable SSA Precharge when CD clock frequency < 500 MHz, 1731 * enable otherwise. 1732 */ 1733 if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000) 1734 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 1735 1736 if (cdctl == expected) 1737 /* All well; nothing to sanitize */ 1738 return; 1739 1740 sanitize: 1741 drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); 1742 1743 /* force cdclk programming */ 1744 dev_priv->cdclk.hw.cdclk = 0; 1745 1746 /* force full PLL disable + enable */ 1747 dev_priv->cdclk.hw.vco = -1; 1748 } 1749 1750 static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) 1751 { 1752 struct intel_cdclk_config cdclk_config; 1753 1754 bxt_sanitize_cdclk(dev_priv); 1755 1756 if (dev_priv->cdclk.hw.cdclk != 0 && 1757 dev_priv->cdclk.hw.vco != 0) 1758 return; 1759 1760 cdclk_config = dev_priv->cdclk.hw; 1761 1762 /* 1763 * FIXME: 1764 * - The initial CDCLK needs to be read from VBT. 1765 * Need to make this change after VBT has changes for BXT. 1766 */ 1767 cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0); 1768 cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk); 1769 cdclk_config.voltage_level = 1770 dev_priv->display.calc_voltage_level(cdclk_config.cdclk); 1771 1772 bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1773 } 1774 1775 static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) 1776 { 1777 struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; 1778 1779 cdclk_config.cdclk = cdclk_config.bypass; 1780 cdclk_config.vco = 0; 1781 cdclk_config.voltage_level = 1782 dev_priv->display.calc_voltage_level(cdclk_config.cdclk); 1783 1784 bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1785 } 1786 1787 /** 1788 * intel_cdclk_init_hw - Initialize CDCLK hardware 1789 * @i915: i915 device 1790 * 1791 * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and 1792 * sanitizing the state of the hardware if needed. This is generally done only 1793 * during the display core initialization sequence, after which the DMC will 1794 * take care of turning CDCLK off/on as needed. 1795 */ 1796 void intel_cdclk_init_hw(struct drm_i915_private *i915) 1797 { 1798 if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) 1799 bxt_cdclk_init_hw(i915); 1800 else if (IS_GEN9_BC(i915)) 1801 skl_cdclk_init_hw(i915); 1802 } 1803 1804 /** 1805 * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware 1806 * @i915: i915 device 1807 * 1808 * Uninitialize CDCLK. This is done only during the display core 1809 * uninitialization sequence. 1810 */ 1811 void intel_cdclk_uninit_hw(struct drm_i915_private *i915) 1812 { 1813 if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915)) 1814 bxt_cdclk_uninit_hw(i915); 1815 else if (IS_GEN9_BC(i915)) 1816 skl_cdclk_uninit_hw(i915); 1817 } 1818 1819 /** 1820 * intel_cdclk_needs_modeset - Determine if changong between the CDCLK 1821 * configurations requires a modeset on all pipes 1822 * @a: first CDCLK configuration 1823 * @b: second CDCLK configuration 1824 * 1825 * Returns: 1826 * True if changing between the two CDCLK configurations 1827 * requires all pipes to be off, false if not. 1828 */ 1829 bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, 1830 const struct intel_cdclk_config *b) 1831 { 1832 return a->cdclk != b->cdclk || 1833 a->vco != b->vco || 1834 a->ref != b->ref; 1835 } 1836 1837 /** 1838 * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK 1839 * configurations requires only a cd2x divider update 1840 * @dev_priv: i915 device 1841 * @a: first CDCLK configuration 1842 * @b: second CDCLK configuration 1843 * 1844 * Returns: 1845 * True if changing between the two CDCLK configurations 1846 * can be done with just a cd2x divider update, false if not. 1847 */ 1848 static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, 1849 const struct intel_cdclk_config *a, 1850 const struct intel_cdclk_config *b) 1851 { 1852 /* Older hw doesn't have the capability */ 1853 if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv)) 1854 return false; 1855 1856 return a->cdclk != b->cdclk && 1857 a->vco == b->vco && 1858 a->ref == b->ref; 1859 } 1860 1861 /** 1862 * intel_cdclk_changed - Determine if two CDCLK configurations are different 1863 * @a: first CDCLK configuration 1864 * @b: second CDCLK configuration 1865 * 1866 * Returns: 1867 * True if the CDCLK configurations don't match, false if they do. 1868 */ 1869 static bool intel_cdclk_changed(const struct intel_cdclk_config *a, 1870 const struct intel_cdclk_config *b) 1871 { 1872 return intel_cdclk_needs_modeset(a, b) || 1873 a->voltage_level != b->voltage_level; 1874 } 1875 1876 void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, 1877 const char *context) 1878 { 1879 DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", 1880 context, cdclk_config->cdclk, cdclk_config->vco, 1881 cdclk_config->ref, cdclk_config->bypass, 1882 cdclk_config->voltage_level); 1883 } 1884 1885 /** 1886 * intel_set_cdclk - Push the CDCLK configuration to the hardware 1887 * @dev_priv: i915 device 1888 * @cdclk_config: new CDCLK configuration 1889 * @pipe: pipe with which to synchronize the update 1890 * 1891 * Program the hardware based on the passed in CDCLK state, 1892 * if necessary. 1893 */ 1894 static void intel_set_cdclk(struct drm_i915_private *dev_priv, 1895 const struct intel_cdclk_config *cdclk_config, 1896 enum pipe pipe) 1897 { 1898 struct intel_encoder *encoder; 1899 1900 if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config)) 1901 return; 1902 1903 if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk)) 1904 return; 1905 1906 intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to"); 1907 1908 /* 1909 * Lock aux/gmbus while we change cdclk in case those 1910 * functions use cdclk. Not all platforms/ports do, 1911 * but we'll lock them all for simplicity. 1912 */ 1913 mutex_lock(&dev_priv->gmbus_mutex); 1914 for_each_intel_dp(&dev_priv->drm, encoder) { 1915 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1916 1917 mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, 1918 &dev_priv->gmbus_mutex); 1919 } 1920 1921 dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe); 1922 1923 for_each_intel_dp(&dev_priv->drm, encoder) { 1924 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1925 1926 mutex_unlock(&intel_dp->aux.hw_mutex); 1927 } 1928 mutex_unlock(&dev_priv->gmbus_mutex); 1929 1930 if (drm_WARN(&dev_priv->drm, 1931 intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), 1932 "cdclk state doesn't match!\n")) { 1933 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]"); 1934 intel_dump_cdclk_config(cdclk_config, "[sw state]"); 1935 } 1936 } 1937 1938 /** 1939 * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware 1940 * @state: intel atomic state 1941 * 1942 * Program the hardware before updating the HW plane state based on the 1943 * new CDCLK state, if necessary. 1944 */ 1945 void 1946 intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) 1947 { 1948 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1949 const struct intel_cdclk_state *old_cdclk_state = 1950 intel_atomic_get_old_cdclk_state(state); 1951 const struct intel_cdclk_state *new_cdclk_state = 1952 intel_atomic_get_new_cdclk_state(state); 1953 enum pipe pipe = new_cdclk_state->pipe; 1954 1955 if (!intel_cdclk_changed(&old_cdclk_state->actual, 1956 &new_cdclk_state->actual)) 1957 return; 1958 1959 if (pipe == INVALID_PIPE || 1960 old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) { 1961 drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); 1962 1963 intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); 1964 } 1965 } 1966 1967 /** 1968 * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware 1969 * @state: intel atomic state 1970 * 1971 * Program the hardware after updating the HW plane state based on the 1972 * new CDCLK state, if necessary. 1973 */ 1974 void 1975 intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) 1976 { 1977 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1978 const struct intel_cdclk_state *old_cdclk_state = 1979 intel_atomic_get_old_cdclk_state(state); 1980 const struct intel_cdclk_state *new_cdclk_state = 1981 intel_atomic_get_new_cdclk_state(state); 1982 enum pipe pipe = new_cdclk_state->pipe; 1983 1984 if (!intel_cdclk_changed(&old_cdclk_state->actual, 1985 &new_cdclk_state->actual)) 1986 return; 1987 1988 if (pipe != INVALID_PIPE && 1989 old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) { 1990 drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); 1991 1992 intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); 1993 } 1994 } 1995 1996 static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) 1997 { 1998 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1999 int pixel_rate = crtc_state->pixel_rate; 2000 2001 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 2002 return DIV_ROUND_UP(pixel_rate, 2); 2003 else if (IS_GEN(dev_priv, 9) || 2004 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2005 return pixel_rate; 2006 else if (IS_CHERRYVIEW(dev_priv)) 2007 return DIV_ROUND_UP(pixel_rate * 100, 95); 2008 else if (crtc_state->double_wide) 2009 return DIV_ROUND_UP(pixel_rate * 100, 90 * 2); 2010 else 2011 return DIV_ROUND_UP(pixel_rate * 100, 90); 2012 } 2013 2014 static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) 2015 { 2016 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2017 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2018 struct intel_plane *plane; 2019 int min_cdclk = 0; 2020 2021 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 2022 min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk); 2023 2024 return min_cdclk; 2025 } 2026 2027 int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) 2028 { 2029 struct drm_i915_private *dev_priv = 2030 to_i915(crtc_state->uapi.crtc->dev); 2031 int min_cdclk; 2032 2033 if (!crtc_state->hw.enable) 2034 return 0; 2035 2036 min_cdclk = intel_pixel_rate_to_cdclk(crtc_state); 2037 2038 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 2039 if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state)) 2040 min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95); 2041 2042 /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz, 2043 * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else 2044 * there may be audio corruption or screen corruption." This cdclk 2045 * restriction for GLK is 316.8 MHz. 2046 */ 2047 if (intel_crtc_has_dp_encoder(crtc_state) && 2048 crtc_state->has_audio && 2049 crtc_state->port_clock >= 540000 && 2050 crtc_state->lane_count == 4) { 2051 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { 2052 /* Display WA #1145: glk,cnl */ 2053 min_cdclk = max(316800, min_cdclk); 2054 } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) { 2055 /* Display WA #1144: skl,bxt */ 2056 min_cdclk = max(432000, min_cdclk); 2057 } 2058 } 2059 2060 /* 2061 * According to BSpec, "The CD clock frequency must be at least twice 2062 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. 2063 */ 2064 if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) 2065 min_cdclk = max(2 * 96000, min_cdclk); 2066 2067 /* 2068 * "For DP audio configuration, cdclk frequency shall be set to 2069 * meet the following requirements: 2070 * DP Link Frequency(MHz) | Cdclk frequency(MHz) 2071 * 270 | 320 or higher 2072 * 162 | 200 or higher" 2073 */ 2074 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2075 intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) 2076 min_cdclk = max(crtc_state->port_clock, min_cdclk); 2077 2078 /* 2079 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower 2080 * than 320000KHz. 2081 */ 2082 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && 2083 IS_VALLEYVIEW(dev_priv)) 2084 min_cdclk = max(320000, min_cdclk); 2085 2086 /* 2087 * On Geminilake once the CDCLK gets as low as 79200 2088 * picture gets unstable, despite that values are 2089 * correct for DSI PLL and DE PLL. 2090 */ 2091 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && 2092 IS_GEMINILAKE(dev_priv)) 2093 min_cdclk = max(158400, min_cdclk); 2094 2095 /* Account for additional needs from the planes */ 2096 min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); 2097 2098 /* 2099 * HACK. Currently for TGL platforms we calculate 2100 * min_cdclk initially based on pixel_rate divided 2101 * by 2, accounting for also plane requirements, 2102 * however in some cases the lowest possible CDCLK 2103 * doesn't work and causing the underruns. 2104 * Explicitly stating here that this seems to be currently 2105 * rather a Hack, than final solution. 2106 */ 2107 if (IS_TIGERLAKE(dev_priv)) { 2108 /* 2109 * Clamp to max_cdclk_freq in case pixel rate is higher, 2110 * in order not to break an 8K, but still leave W/A at place. 2111 */ 2112 min_cdclk = max_t(int, min_cdclk, 2113 min_t(int, crtc_state->pixel_rate, 2114 dev_priv->max_cdclk_freq)); 2115 } 2116 2117 if (min_cdclk > dev_priv->max_cdclk_freq) { 2118 drm_dbg_kms(&dev_priv->drm, 2119 "required cdclk (%d kHz) exceeds max (%d kHz)\n", 2120 min_cdclk, dev_priv->max_cdclk_freq); 2121 return -EINVAL; 2122 } 2123 2124 return min_cdclk; 2125 } 2126 2127 static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) 2128 { 2129 struct intel_atomic_state *state = cdclk_state->base.state; 2130 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2131 struct intel_bw_state *bw_state = NULL; 2132 struct intel_crtc *crtc; 2133 struct intel_crtc_state *crtc_state; 2134 int min_cdclk, i; 2135 enum pipe pipe; 2136 2137 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 2138 int ret; 2139 2140 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 2141 if (min_cdclk < 0) 2142 return min_cdclk; 2143 2144 bw_state = intel_atomic_get_bw_state(state); 2145 if (IS_ERR(bw_state)) 2146 return PTR_ERR(bw_state); 2147 2148 if (cdclk_state->min_cdclk[i] == min_cdclk) 2149 continue; 2150 2151 cdclk_state->min_cdclk[i] = min_cdclk; 2152 2153 ret = intel_atomic_lock_global_state(&cdclk_state->base); 2154 if (ret) 2155 return ret; 2156 } 2157 2158 min_cdclk = cdclk_state->force_min_cdclk; 2159 for_each_pipe(dev_priv, pipe) { 2160 min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); 2161 2162 if (!bw_state) 2163 continue; 2164 2165 min_cdclk = max(bw_state->min_cdclk, min_cdclk); 2166 } 2167 2168 return min_cdclk; 2169 } 2170 2171 /* 2172 * Account for port clock min voltage level requirements. 2173 * This only really does something on CNL+ but can be 2174 * called on earlier platforms as well. 2175 * 2176 * Note that this functions assumes that 0 is 2177 * the lowest voltage value, and higher values 2178 * correspond to increasingly higher voltages. 2179 * 2180 * Should that relationship no longer hold on 2181 * future platforms this code will need to be 2182 * adjusted. 2183 */ 2184 static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state) 2185 { 2186 struct intel_atomic_state *state = cdclk_state->base.state; 2187 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2188 struct intel_crtc *crtc; 2189 struct intel_crtc_state *crtc_state; 2190 u8 min_voltage_level; 2191 int i; 2192 enum pipe pipe; 2193 2194 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 2195 int ret; 2196 2197 if (crtc_state->hw.enable) 2198 min_voltage_level = crtc_state->min_voltage_level; 2199 else 2200 min_voltage_level = 0; 2201 2202 if (cdclk_state->min_voltage_level[i] == min_voltage_level) 2203 continue; 2204 2205 cdclk_state->min_voltage_level[i] = min_voltage_level; 2206 2207 ret = intel_atomic_lock_global_state(&cdclk_state->base); 2208 if (ret) 2209 return ret; 2210 } 2211 2212 min_voltage_level = 0; 2213 for_each_pipe(dev_priv, pipe) 2214 min_voltage_level = max(cdclk_state->min_voltage_level[pipe], 2215 min_voltage_level); 2216 2217 return min_voltage_level; 2218 } 2219 2220 static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2221 { 2222 struct intel_atomic_state *state = cdclk_state->base.state; 2223 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2224 int min_cdclk, cdclk; 2225 2226 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2227 if (min_cdclk < 0) 2228 return min_cdclk; 2229 2230 cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); 2231 2232 cdclk_state->logical.cdclk = cdclk; 2233 cdclk_state->logical.voltage_level = 2234 vlv_calc_voltage_level(dev_priv, cdclk); 2235 2236 if (!cdclk_state->active_pipes) { 2237 cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); 2238 2239 cdclk_state->actual.cdclk = cdclk; 2240 cdclk_state->actual.voltage_level = 2241 vlv_calc_voltage_level(dev_priv, cdclk); 2242 } else { 2243 cdclk_state->actual = cdclk_state->logical; 2244 } 2245 2246 return 0; 2247 } 2248 2249 static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2250 { 2251 int min_cdclk, cdclk; 2252 2253 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2254 if (min_cdclk < 0) 2255 return min_cdclk; 2256 2257 /* 2258 * FIXME should also account for plane ratio 2259 * once 64bpp pixel formats are supported. 2260 */ 2261 cdclk = bdw_calc_cdclk(min_cdclk); 2262 2263 cdclk_state->logical.cdclk = cdclk; 2264 cdclk_state->logical.voltage_level = 2265 bdw_calc_voltage_level(cdclk); 2266 2267 if (!cdclk_state->active_pipes) { 2268 cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk); 2269 2270 cdclk_state->actual.cdclk = cdclk; 2271 cdclk_state->actual.voltage_level = 2272 bdw_calc_voltage_level(cdclk); 2273 } else { 2274 cdclk_state->actual = cdclk_state->logical; 2275 } 2276 2277 return 0; 2278 } 2279 2280 static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state) 2281 { 2282 struct intel_atomic_state *state = cdclk_state->base.state; 2283 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2284 struct intel_crtc *crtc; 2285 struct intel_crtc_state *crtc_state; 2286 int vco, i; 2287 2288 vco = cdclk_state->logical.vco; 2289 if (!vco) 2290 vco = dev_priv->skl_preferred_vco_freq; 2291 2292 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 2293 if (!crtc_state->hw.enable) 2294 continue; 2295 2296 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 2297 continue; 2298 2299 /* 2300 * DPLL0 VCO may need to be adjusted to get the correct 2301 * clock for eDP. This will affect cdclk as well. 2302 */ 2303 switch (crtc_state->port_clock / 2) { 2304 case 108000: 2305 case 216000: 2306 vco = 8640000; 2307 break; 2308 default: 2309 vco = 8100000; 2310 break; 2311 } 2312 } 2313 2314 return vco; 2315 } 2316 2317 static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2318 { 2319 int min_cdclk, cdclk, vco; 2320 2321 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2322 if (min_cdclk < 0) 2323 return min_cdclk; 2324 2325 vco = skl_dpll0_vco(cdclk_state); 2326 2327 /* 2328 * FIXME should also account for plane ratio 2329 * once 64bpp pixel formats are supported. 2330 */ 2331 cdclk = skl_calc_cdclk(min_cdclk, vco); 2332 2333 cdclk_state->logical.vco = vco; 2334 cdclk_state->logical.cdclk = cdclk; 2335 cdclk_state->logical.voltage_level = 2336 skl_calc_voltage_level(cdclk); 2337 2338 if (!cdclk_state->active_pipes) { 2339 cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco); 2340 2341 cdclk_state->actual.vco = vco; 2342 cdclk_state->actual.cdclk = cdclk; 2343 cdclk_state->actual.voltage_level = 2344 skl_calc_voltage_level(cdclk); 2345 } else { 2346 cdclk_state->actual = cdclk_state->logical; 2347 } 2348 2349 return 0; 2350 } 2351 2352 static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2353 { 2354 struct intel_atomic_state *state = cdclk_state->base.state; 2355 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2356 int min_cdclk, min_voltage_level, cdclk, vco; 2357 2358 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2359 if (min_cdclk < 0) 2360 return min_cdclk; 2361 2362 min_voltage_level = bxt_compute_min_voltage_level(cdclk_state); 2363 if (min_voltage_level < 0) 2364 return min_voltage_level; 2365 2366 cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); 2367 vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); 2368 2369 cdclk_state->logical.vco = vco; 2370 cdclk_state->logical.cdclk = cdclk; 2371 cdclk_state->logical.voltage_level = 2372 max_t(int, min_voltage_level, 2373 dev_priv->display.calc_voltage_level(cdclk)); 2374 2375 if (!cdclk_state->active_pipes) { 2376 cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); 2377 vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); 2378 2379 cdclk_state->actual.vco = vco; 2380 cdclk_state->actual.cdclk = cdclk; 2381 cdclk_state->actual.voltage_level = 2382 dev_priv->display.calc_voltage_level(cdclk); 2383 } else { 2384 cdclk_state->actual = cdclk_state->logical; 2385 } 2386 2387 return 0; 2388 } 2389 2390 static int intel_modeset_all_pipes(struct intel_atomic_state *state) 2391 { 2392 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2393 struct intel_crtc *crtc; 2394 2395 /* 2396 * Add all pipes to the state, and force 2397 * a modeset on all the active ones. 2398 */ 2399 for_each_intel_crtc(&dev_priv->drm, crtc) { 2400 struct intel_crtc_state *crtc_state; 2401 int ret; 2402 2403 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 2404 if (IS_ERR(crtc_state)) 2405 return PTR_ERR(crtc_state); 2406 2407 if (!crtc_state->hw.active || 2408 drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) 2409 continue; 2410 2411 crtc_state->uapi.mode_changed = true; 2412 2413 ret = drm_atomic_add_affected_connectors(&state->base, 2414 &crtc->base); 2415 if (ret) 2416 return ret; 2417 2418 ret = intel_atomic_add_affected_planes(state, crtc); 2419 if (ret) 2420 return ret; 2421 2422 crtc_state->update_planes |= crtc_state->active_planes; 2423 } 2424 2425 return 0; 2426 } 2427 2428 static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2429 { 2430 int min_cdclk; 2431 2432 /* 2433 * We can't change the cdclk frequency, but we still want to 2434 * check that the required minimum frequency doesn't exceed 2435 * the actual cdclk frequency. 2436 */ 2437 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2438 if (min_cdclk < 0) 2439 return min_cdclk; 2440 2441 return 0; 2442 } 2443 2444 static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj) 2445 { 2446 struct intel_cdclk_state *cdclk_state; 2447 2448 cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL); 2449 if (!cdclk_state) 2450 return NULL; 2451 2452 cdclk_state->pipe = INVALID_PIPE; 2453 2454 return &cdclk_state->base; 2455 } 2456 2457 static void intel_cdclk_destroy_state(struct intel_global_obj *obj, 2458 struct intel_global_state *state) 2459 { 2460 kfree(state); 2461 } 2462 2463 static const struct intel_global_state_funcs intel_cdclk_funcs = { 2464 .atomic_duplicate_state = intel_cdclk_duplicate_state, 2465 .atomic_destroy_state = intel_cdclk_destroy_state, 2466 }; 2467 2468 struct intel_cdclk_state * 2469 intel_atomic_get_cdclk_state(struct intel_atomic_state *state) 2470 { 2471 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2472 struct intel_global_state *cdclk_state; 2473 2474 cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj); 2475 if (IS_ERR(cdclk_state)) 2476 return ERR_CAST(cdclk_state); 2477 2478 return to_intel_cdclk_state(cdclk_state); 2479 } 2480 2481 int intel_cdclk_init(struct drm_i915_private *dev_priv) 2482 { 2483 struct intel_cdclk_state *cdclk_state; 2484 2485 cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL); 2486 if (!cdclk_state) 2487 return -ENOMEM; 2488 2489 intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj, 2490 &cdclk_state->base, &intel_cdclk_funcs); 2491 2492 return 0; 2493 } 2494 2495 int intel_modeset_calc_cdclk(struct intel_atomic_state *state) 2496 { 2497 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2498 const struct intel_cdclk_state *old_cdclk_state; 2499 struct intel_cdclk_state *new_cdclk_state; 2500 enum pipe pipe; 2501 int ret; 2502 2503 new_cdclk_state = intel_atomic_get_cdclk_state(state); 2504 if (IS_ERR(new_cdclk_state)) 2505 return PTR_ERR(new_cdclk_state); 2506 2507 old_cdclk_state = intel_atomic_get_old_cdclk_state(state); 2508 2509 new_cdclk_state->active_pipes = 2510 intel_calc_active_pipes(state, old_cdclk_state->active_pipes); 2511 2512 ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state); 2513 if (ret) 2514 return ret; 2515 2516 if (intel_cdclk_changed(&old_cdclk_state->actual, 2517 &new_cdclk_state->actual)) { 2518 /* 2519 * Also serialize commits across all crtcs 2520 * if the actual hw needs to be poked. 2521 */ 2522 ret = intel_atomic_serialize_global_state(&new_cdclk_state->base); 2523 if (ret) 2524 return ret; 2525 } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes || 2526 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk || 2527 intel_cdclk_changed(&old_cdclk_state->logical, 2528 &new_cdclk_state->logical)) { 2529 ret = intel_atomic_lock_global_state(&new_cdclk_state->base); 2530 if (ret) 2531 return ret; 2532 } else { 2533 return 0; 2534 } 2535 2536 if (is_power_of_2(new_cdclk_state->active_pipes) && 2537 intel_cdclk_can_cd2x_update(dev_priv, 2538 &old_cdclk_state->actual, 2539 &new_cdclk_state->actual)) { 2540 struct intel_crtc *crtc; 2541 struct intel_crtc_state *crtc_state; 2542 2543 pipe = ilog2(new_cdclk_state->active_pipes); 2544 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 2545 2546 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 2547 if (IS_ERR(crtc_state)) 2548 return PTR_ERR(crtc_state); 2549 2550 if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) 2551 pipe = INVALID_PIPE; 2552 } else { 2553 pipe = INVALID_PIPE; 2554 } 2555 2556 if (pipe != INVALID_PIPE) { 2557 new_cdclk_state->pipe = pipe; 2558 2559 drm_dbg_kms(&dev_priv->drm, 2560 "Can change cdclk with pipe %c active\n", 2561 pipe_name(pipe)); 2562 } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual, 2563 &new_cdclk_state->actual)) { 2564 /* All pipes must be switched off while we change the cdclk. */ 2565 ret = intel_modeset_all_pipes(state); 2566 if (ret) 2567 return ret; 2568 2569 new_cdclk_state->pipe = INVALID_PIPE; 2570 2571 drm_dbg_kms(&dev_priv->drm, 2572 "Modeset required for cdclk change\n"); 2573 } 2574 2575 drm_dbg_kms(&dev_priv->drm, 2576 "New cdclk calculated to be logical %u kHz, actual %u kHz\n", 2577 new_cdclk_state->logical.cdclk, 2578 new_cdclk_state->actual.cdclk); 2579 drm_dbg_kms(&dev_priv->drm, 2580 "New voltage level calculated to be logical %u, actual %u\n", 2581 new_cdclk_state->logical.voltage_level, 2582 new_cdclk_state->actual.voltage_level); 2583 2584 return 0; 2585 } 2586 2587 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 2588 { 2589 int max_cdclk_freq = dev_priv->max_cdclk_freq; 2590 2591 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 2592 return 2 * max_cdclk_freq; 2593 else if (IS_GEN(dev_priv, 9) || 2594 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2595 return max_cdclk_freq; 2596 else if (IS_CHERRYVIEW(dev_priv)) 2597 return max_cdclk_freq*95/100; 2598 else if (INTEL_GEN(dev_priv) < 4) 2599 return 2*max_cdclk_freq*90/100; 2600 else 2601 return max_cdclk_freq*90/100; 2602 } 2603 2604 /** 2605 * intel_update_max_cdclk - Determine the maximum support CDCLK frequency 2606 * @dev_priv: i915 device 2607 * 2608 * Determine the maximum CDCLK frequency the platform supports, and also 2609 * derive the maximum dot clock frequency the maximum CDCLK frequency 2610 * allows. 2611 */ 2612 void intel_update_max_cdclk(struct drm_i915_private *dev_priv) 2613 { 2614 if (IS_JSL_EHL(dev_priv)) { 2615 if (dev_priv->cdclk.hw.ref == 24000) 2616 dev_priv->max_cdclk_freq = 552000; 2617 else 2618 dev_priv->max_cdclk_freq = 556800; 2619 } else if (INTEL_GEN(dev_priv) >= 11) { 2620 if (dev_priv->cdclk.hw.ref == 24000) 2621 dev_priv->max_cdclk_freq = 648000; 2622 else 2623 dev_priv->max_cdclk_freq = 652800; 2624 } else if (IS_CANNONLAKE(dev_priv)) { 2625 dev_priv->max_cdclk_freq = 528000; 2626 } else if (IS_GEN9_BC(dev_priv)) { 2627 u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 2628 int max_cdclk, vco; 2629 2630 vco = dev_priv->skl_preferred_vco_freq; 2631 drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); 2632 2633 /* 2634 * Use the lower (vco 8640) cdclk values as a 2635 * first guess. skl_calc_cdclk() will correct it 2636 * if the preferred vco is 8100 instead. 2637 */ 2638 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 2639 max_cdclk = 617143; 2640 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 2641 max_cdclk = 540000; 2642 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 2643 max_cdclk = 432000; 2644 else 2645 max_cdclk = 308571; 2646 2647 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); 2648 } else if (IS_GEMINILAKE(dev_priv)) { 2649 dev_priv->max_cdclk_freq = 316800; 2650 } else if (IS_BROXTON(dev_priv)) { 2651 dev_priv->max_cdclk_freq = 624000; 2652 } else if (IS_BROADWELL(dev_priv)) { 2653 /* 2654 * FIXME with extra cooling we can allow 2655 * 540 MHz for ULX and 675 Mhz for ULT. 2656 * How can we know if extra cooling is 2657 * available? PCI ID, VTB, something else? 2658 */ 2659 if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) 2660 dev_priv->max_cdclk_freq = 450000; 2661 else if (IS_BDW_ULX(dev_priv)) 2662 dev_priv->max_cdclk_freq = 450000; 2663 else if (IS_BDW_ULT(dev_priv)) 2664 dev_priv->max_cdclk_freq = 540000; 2665 else 2666 dev_priv->max_cdclk_freq = 675000; 2667 } else if (IS_CHERRYVIEW(dev_priv)) { 2668 dev_priv->max_cdclk_freq = 320000; 2669 } else if (IS_VALLEYVIEW(dev_priv)) { 2670 dev_priv->max_cdclk_freq = 400000; 2671 } else { 2672 /* otherwise assume cdclk is fixed */ 2673 dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk; 2674 } 2675 2676 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); 2677 2678 drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", 2679 dev_priv->max_cdclk_freq); 2680 2681 drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", 2682 dev_priv->max_dotclk_freq); 2683 } 2684 2685 /** 2686 * intel_update_cdclk - Determine the current CDCLK frequency 2687 * @dev_priv: i915 device 2688 * 2689 * Determine the current CDCLK frequency. 2690 */ 2691 void intel_update_cdclk(struct drm_i915_private *dev_priv) 2692 { 2693 dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw); 2694 2695 /* 2696 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): 2697 * Programmng [sic] note: bit[9:2] should be programmed to the number 2698 * of cdclk that generates 4MHz reference clock freq which is used to 2699 * generate GMBus clock. This will vary with the cdclk freq. 2700 */ 2701 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2702 intel_de_write(dev_priv, GMBUSFREQ_VLV, 2703 DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); 2704 } 2705 2706 static int dg1_rawclk(struct drm_i915_private *dev_priv) 2707 { 2708 /* 2709 * DG1 always uses a 38.4 MHz rawclk. The bspec tells us 2710 * "Program Numerator=2, Denominator=4, Divider=37 decimal." 2711 */ 2712 intel_de_write(dev_priv, PCH_RAWCLK_FREQ, 2713 CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); 2714 2715 return 38400; 2716 } 2717 2718 static int cnp_rawclk(struct drm_i915_private *dev_priv) 2719 { 2720 u32 rawclk; 2721 int divider, fraction; 2722 2723 if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { 2724 /* 24 MHz */ 2725 divider = 24000; 2726 fraction = 0; 2727 } else { 2728 /* 19.2 MHz */ 2729 divider = 19000; 2730 fraction = 200; 2731 } 2732 2733 rawclk = CNP_RAWCLK_DIV(divider / 1000); 2734 if (fraction) { 2735 int numerator = 1; 2736 2737 rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000, 2738 fraction) - 1); 2739 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2740 rawclk |= ICP_RAWCLK_NUM(numerator); 2741 } 2742 2743 intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk); 2744 return divider + fraction; 2745 } 2746 2747 static int pch_rawclk(struct drm_i915_private *dev_priv) 2748 { 2749 return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; 2750 } 2751 2752 static int vlv_hrawclk(struct drm_i915_private *dev_priv) 2753 { 2754 /* RAWCLK_FREQ_VLV register updated from power well code */ 2755 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 2756 CCK_DISPLAY_REF_CLOCK_CONTROL); 2757 } 2758 2759 static int i9xx_hrawclk(struct drm_i915_private *dev_priv) 2760 { 2761 u32 clkcfg; 2762 2763 /* 2764 * hrawclock is 1/4 the FSB frequency 2765 * 2766 * Note that this only reads the state of the FSB 2767 * straps, not the actual FSB frequency. Some BIOSen 2768 * let you configure each independently. Ideally we'd 2769 * read out the actual FSB frequency but sadly we 2770 * don't know which registers have that information, 2771 * and all the relevant docs have gone to bit heaven :( 2772 */ 2773 clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK; 2774 2775 if (IS_MOBILE(dev_priv)) { 2776 switch (clkcfg) { 2777 case CLKCFG_FSB_400: 2778 return 100000; 2779 case CLKCFG_FSB_533: 2780 return 133333; 2781 case CLKCFG_FSB_667: 2782 return 166667; 2783 case CLKCFG_FSB_800: 2784 return 200000; 2785 case CLKCFG_FSB_1067: 2786 return 266667; 2787 case CLKCFG_FSB_1333: 2788 return 333333; 2789 default: 2790 MISSING_CASE(clkcfg); 2791 return 133333; 2792 } 2793 } else { 2794 switch (clkcfg) { 2795 case CLKCFG_FSB_400_ALT: 2796 return 100000; 2797 case CLKCFG_FSB_533: 2798 return 133333; 2799 case CLKCFG_FSB_667: 2800 return 166667; 2801 case CLKCFG_FSB_800: 2802 return 200000; 2803 case CLKCFG_FSB_1067_ALT: 2804 return 266667; 2805 case CLKCFG_FSB_1333_ALT: 2806 return 333333; 2807 case CLKCFG_FSB_1600_ALT: 2808 return 400000; 2809 default: 2810 return 133333; 2811 } 2812 } 2813 } 2814 2815 /** 2816 * intel_read_rawclk - Determine the current RAWCLK frequency 2817 * @dev_priv: i915 device 2818 * 2819 * Determine the current RAWCLK frequency. RAWCLK is a fixed 2820 * frequency clock so this needs to done only once. 2821 */ 2822 u32 intel_read_rawclk(struct drm_i915_private *dev_priv) 2823 { 2824 u32 freq; 2825 2826 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) 2827 freq = dg1_rawclk(dev_priv); 2828 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 2829 freq = cnp_rawclk(dev_priv); 2830 else if (HAS_PCH_SPLIT(dev_priv)) 2831 freq = pch_rawclk(dev_priv); 2832 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2833 freq = vlv_hrawclk(dev_priv); 2834 else if (INTEL_GEN(dev_priv) >= 3) 2835 freq = i9xx_hrawclk(dev_priv); 2836 else 2837 /* no rawclk on other platforms, or no need to know it */ 2838 return 0; 2839 2840 return freq; 2841 } 2842 2843 /** 2844 * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks 2845 * @dev_priv: i915 device 2846 */ 2847 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) 2848 { 2849 if (IS_ROCKETLAKE(dev_priv)) { 2850 dev_priv->display.set_cdclk = bxt_set_cdclk; 2851 dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; 2852 dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; 2853 dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; 2854 dev_priv->cdclk.table = rkl_cdclk_table; 2855 } else if (INTEL_GEN(dev_priv) >= 12) { 2856 dev_priv->display.set_cdclk = bxt_set_cdclk; 2857 dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; 2858 dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; 2859 dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; 2860 dev_priv->cdclk.table = icl_cdclk_table; 2861 } else if (IS_JSL_EHL(dev_priv)) { 2862 dev_priv->display.set_cdclk = bxt_set_cdclk; 2863 dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; 2864 dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; 2865 dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; 2866 dev_priv->cdclk.table = icl_cdclk_table; 2867 } else if (INTEL_GEN(dev_priv) >= 11) { 2868 dev_priv->display.set_cdclk = bxt_set_cdclk; 2869 dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; 2870 dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; 2871 dev_priv->display.calc_voltage_level = icl_calc_voltage_level; 2872 dev_priv->cdclk.table = icl_cdclk_table; 2873 } else if (IS_CANNONLAKE(dev_priv)) { 2874 dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; 2875 dev_priv->display.set_cdclk = bxt_set_cdclk; 2876 dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; 2877 dev_priv->display.calc_voltage_level = cnl_calc_voltage_level; 2878 dev_priv->cdclk.table = cnl_cdclk_table; 2879 } else if (IS_GEN9_LP(dev_priv)) { 2880 dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; 2881 dev_priv->display.set_cdclk = bxt_set_cdclk; 2882 dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; 2883 dev_priv->display.calc_voltage_level = bxt_calc_voltage_level; 2884 if (IS_GEMINILAKE(dev_priv)) 2885 dev_priv->cdclk.table = glk_cdclk_table; 2886 else 2887 dev_priv->cdclk.table = bxt_cdclk_table; 2888 } else if (IS_GEN9_BC(dev_priv)) { 2889 dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; 2890 dev_priv->display.set_cdclk = skl_set_cdclk; 2891 dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; 2892 } else if (IS_BROADWELL(dev_priv)) { 2893 dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; 2894 dev_priv->display.set_cdclk = bdw_set_cdclk; 2895 dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk; 2896 } else if (IS_CHERRYVIEW(dev_priv)) { 2897 dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; 2898 dev_priv->display.set_cdclk = chv_set_cdclk; 2899 dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; 2900 } else if (IS_VALLEYVIEW(dev_priv)) { 2901 dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; 2902 dev_priv->display.set_cdclk = vlv_set_cdclk; 2903 dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; 2904 } else { 2905 dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; 2906 dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk; 2907 } 2908 2909 if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv)) 2910 dev_priv->display.get_cdclk = bxt_get_cdclk; 2911 else if (IS_GEN9_BC(dev_priv)) 2912 dev_priv->display.get_cdclk = skl_get_cdclk; 2913 else if (IS_BROADWELL(dev_priv)) 2914 dev_priv->display.get_cdclk = bdw_get_cdclk; 2915 else if (IS_HASWELL(dev_priv)) 2916 dev_priv->display.get_cdclk = hsw_get_cdclk; 2917 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2918 dev_priv->display.get_cdclk = vlv_get_cdclk; 2919 else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 2920 dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; 2921 else if (IS_GEN(dev_priv, 5)) 2922 dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk; 2923 else if (IS_GM45(dev_priv)) 2924 dev_priv->display.get_cdclk = gm45_get_cdclk; 2925 else if (IS_G45(dev_priv)) 2926 dev_priv->display.get_cdclk = g33_get_cdclk; 2927 else if (IS_I965GM(dev_priv)) 2928 dev_priv->display.get_cdclk = i965gm_get_cdclk; 2929 else if (IS_I965G(dev_priv)) 2930 dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; 2931 else if (IS_PINEVIEW(dev_priv)) 2932 dev_priv->display.get_cdclk = pnv_get_cdclk; 2933 else if (IS_G33(dev_priv)) 2934 dev_priv->display.get_cdclk = g33_get_cdclk; 2935 else if (IS_I945GM(dev_priv)) 2936 dev_priv->display.get_cdclk = i945gm_get_cdclk; 2937 else if (IS_I945G(dev_priv)) 2938 dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; 2939 else if (IS_I915GM(dev_priv)) 2940 dev_priv->display.get_cdclk = i915gm_get_cdclk; 2941 else if (IS_I915G(dev_priv)) 2942 dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk; 2943 else if (IS_I865G(dev_priv)) 2944 dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk; 2945 else if (IS_I85X(dev_priv)) 2946 dev_priv->display.get_cdclk = i85x_get_cdclk; 2947 else if (IS_I845G(dev_priv)) 2948 dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk; 2949 else if (IS_I830(dev_priv)) 2950 dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk; 2951 2952 if (drm_WARN(&dev_priv->drm, !dev_priv->display.get_cdclk, 2953 "Unknown platform. Assuming 133 MHz CDCLK\n")) 2954 dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk; 2955 } 2956