1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_dpio_phy.h"
25 #include "intel_dpll_mgr.h"
26 #include "intel_drv.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modest interface. During an atomic operation, a PLL can be requested for a
40  * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
41  * a previously used PLL can be released with intel_release_shared_dpll().
42  * Changes to the users are first staged in the atomic state, and then made
43  * effective by calling intel_shared_dpll_swap_state() during the atomic
44  * commit phase.
45  */
46 
47 static void
48 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
49 				  struct intel_shared_dpll_state *shared_dpll)
50 {
51 	enum intel_dpll_id i;
52 
53 	/* Copy shared dpll state */
54 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
55 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
56 
57 		shared_dpll[i] = pll->state;
58 	}
59 }
60 
61 static struct intel_shared_dpll_state *
62 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
63 {
64 	struct intel_atomic_state *state = to_intel_atomic_state(s);
65 
66 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
67 
68 	if (!state->dpll_set) {
69 		state->dpll_set = true;
70 
71 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
72 						  state->shared_dpll);
73 	}
74 
75 	return state->shared_dpll;
76 }
77 
78 /**
79  * intel_get_shared_dpll_by_id - get a DPLL given its id
80  * @dev_priv: i915 device instance
81  * @id: pll id
82  *
83  * Returns:
84  * A pointer to the DPLL with @id
85  */
86 struct intel_shared_dpll *
87 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
88 			    enum intel_dpll_id id)
89 {
90 	return &dev_priv->shared_dplls[id];
91 }
92 
93 /**
94  * intel_get_shared_dpll_id - get the id of a DPLL
95  * @dev_priv: i915 device instance
96  * @pll: the DPLL
97  *
98  * Returns:
99  * The id of @pll
100  */
101 enum intel_dpll_id
102 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
103 			 struct intel_shared_dpll *pll)
104 {
105 	if (WARN_ON(pll < dev_priv->shared_dplls||
106 		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
107 		return -1;
108 
109 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
110 }
111 
112 /* For ILK+ */
113 void assert_shared_dpll(struct drm_i915_private *dev_priv,
114 			struct intel_shared_dpll *pll,
115 			bool state)
116 {
117 	bool cur_state;
118 	struct intel_dpll_hw_state hw_state;
119 
120 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
121 		return;
122 
123 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
124 	I915_STATE_WARN(cur_state != state,
125 	     "%s assertion failure (expected %s, current %s)\n",
126 			pll->info->name, onoff(state), onoff(cur_state));
127 }
128 
129 /**
130  * intel_prepare_shared_dpll - call a dpll's prepare hook
131  * @crtc_state: CRTC, and its state, which has a shared dpll
132  *
133  * This calls the PLL's prepare hook if it has one and if the PLL is not
134  * already enabled. The prepare hook is platform specific.
135  */
136 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
137 {
138 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
139 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
140 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
141 
142 	if (WARN_ON(pll == NULL))
143 		return;
144 
145 	mutex_lock(&dev_priv->dpll_lock);
146 	WARN_ON(!pll->state.crtc_mask);
147 	if (!pll->active_mask) {
148 		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
149 		WARN_ON(pll->on);
150 		assert_shared_dpll_disabled(dev_priv, pll);
151 
152 		pll->info->funcs->prepare(dev_priv, pll);
153 	}
154 	mutex_unlock(&dev_priv->dpll_lock);
155 }
156 
157 /**
158  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
159  * @crtc_state: CRTC, and its state, which has a shared DPLL
160  *
161  * Enable the shared DPLL used by @crtc.
162  */
163 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
164 {
165 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
166 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
167 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
168 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
169 	unsigned int old_mask;
170 
171 	if (WARN_ON(pll == NULL))
172 		return;
173 
174 	mutex_lock(&dev_priv->dpll_lock);
175 	old_mask = pll->active_mask;
176 
177 	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
178 	    WARN_ON(pll->active_mask & crtc_mask))
179 		goto out;
180 
181 	pll->active_mask |= crtc_mask;
182 
183 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
184 		      pll->info->name, pll->active_mask, pll->on,
185 		      crtc->base.base.id);
186 
187 	if (old_mask) {
188 		WARN_ON(!pll->on);
189 		assert_shared_dpll_enabled(dev_priv, pll);
190 		goto out;
191 	}
192 	WARN_ON(pll->on);
193 
194 	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
195 	pll->info->funcs->enable(dev_priv, pll);
196 	pll->on = true;
197 
198 out:
199 	mutex_unlock(&dev_priv->dpll_lock);
200 }
201 
202 /**
203  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
204  * @crtc_state: CRTC, and its state, which has a shared DPLL
205  *
206  * Disable the shared DPLL used by @crtc.
207  */
208 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
209 {
210 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
211 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
212 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
213 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
214 
215 	/* PCH only available on ILK+ */
216 	if (INTEL_GEN(dev_priv) < 5)
217 		return;
218 
219 	if (pll == NULL)
220 		return;
221 
222 	mutex_lock(&dev_priv->dpll_lock);
223 	if (WARN_ON(!(pll->active_mask & crtc_mask)))
224 		goto out;
225 
226 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
227 		      pll->info->name, pll->active_mask, pll->on,
228 		      crtc->base.base.id);
229 
230 	assert_shared_dpll_enabled(dev_priv, pll);
231 	WARN_ON(!pll->on);
232 
233 	pll->active_mask &= ~crtc_mask;
234 	if (pll->active_mask)
235 		goto out;
236 
237 	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
238 	pll->info->funcs->disable(dev_priv, pll);
239 	pll->on = false;
240 
241 out:
242 	mutex_unlock(&dev_priv->dpll_lock);
243 }
244 
245 static struct intel_shared_dpll *
246 intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
247 		       enum intel_dpll_id range_min,
248 		       enum intel_dpll_id range_max)
249 {
250 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
251 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
252 	struct intel_shared_dpll *pll, *unused_pll = NULL;
253 	struct intel_shared_dpll_state *shared_dpll;
254 	enum intel_dpll_id i;
255 
256 	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
257 
258 	for (i = range_min; i <= range_max; i++) {
259 		pll = &dev_priv->shared_dplls[i];
260 
261 		/* Only want to check enabled timings first */
262 		if (shared_dpll[i].crtc_mask == 0) {
263 			if (!unused_pll)
264 				unused_pll = pll;
265 			continue;
266 		}
267 
268 		if (memcmp(&crtc_state->dpll_hw_state,
269 			   &shared_dpll[i].hw_state,
270 			   sizeof(crtc_state->dpll_hw_state)) == 0) {
271 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
272 				      crtc->base.base.id, crtc->base.name,
273 				      pll->info->name,
274 				      shared_dpll[i].crtc_mask,
275 				      pll->active_mask);
276 			return pll;
277 		}
278 	}
279 
280 	/* Ok no matching timings, maybe there's a free one? */
281 	if (unused_pll) {
282 		DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
283 			      crtc->base.base.id, crtc->base.name,
284 			      unused_pll->info->name);
285 		return unused_pll;
286 	}
287 
288 	return NULL;
289 }
290 
291 static void
292 intel_reference_shared_dpll(struct intel_shared_dpll *pll,
293 			    struct intel_crtc_state *crtc_state)
294 {
295 	struct intel_shared_dpll_state *shared_dpll;
296 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
297 	const enum intel_dpll_id id = pll->info->id;
298 
299 	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
300 
301 	if (shared_dpll[id].crtc_mask == 0)
302 		shared_dpll[id].hw_state =
303 			crtc_state->dpll_hw_state;
304 
305 	crtc_state->shared_dpll = pll;
306 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
307 			 pipe_name(crtc->pipe));
308 
309 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
310 }
311 
312 /**
313  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
314  * @state: atomic state
315  *
316  * This is the dpll version of drm_atomic_helper_swap_state() since the
317  * helper does not handle driver-specific global state.
318  *
319  * For consistency with atomic helpers this function does a complete swap,
320  * i.e. it also puts the current state into @state, even though there is no
321  * need for that at this moment.
322  */
323 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
324 {
325 	struct drm_i915_private *dev_priv = to_i915(state->dev);
326 	struct intel_shared_dpll_state *shared_dpll;
327 	struct intel_shared_dpll *pll;
328 	enum intel_dpll_id i;
329 
330 	if (!to_intel_atomic_state(state)->dpll_set)
331 		return;
332 
333 	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
334 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
335 		struct intel_shared_dpll_state tmp;
336 
337 		pll = &dev_priv->shared_dplls[i];
338 
339 		tmp = pll->state;
340 		pll->state = shared_dpll[i];
341 		shared_dpll[i] = tmp;
342 	}
343 }
344 
345 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
346 				      struct intel_shared_dpll *pll,
347 				      struct intel_dpll_hw_state *hw_state)
348 {
349 	const enum intel_dpll_id id = pll->info->id;
350 	intel_wakeref_t wakeref;
351 	u32 val;
352 
353 	wakeref = intel_display_power_get_if_enabled(dev_priv,
354 						     POWER_DOMAIN_DISPLAY_CORE);
355 	if (!wakeref)
356 		return false;
357 
358 	val = I915_READ(PCH_DPLL(id));
359 	hw_state->dpll = val;
360 	hw_state->fp0 = I915_READ(PCH_FP0(id));
361 	hw_state->fp1 = I915_READ(PCH_FP1(id));
362 
363 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
364 
365 	return val & DPLL_VCO_ENABLE;
366 }
367 
368 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
369 				 struct intel_shared_dpll *pll)
370 {
371 	const enum intel_dpll_id id = pll->info->id;
372 
373 	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
374 	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
375 }
376 
377 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
378 {
379 	u32 val;
380 	bool enabled;
381 
382 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
383 
384 	val = I915_READ(PCH_DREF_CONTROL);
385 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
386 			    DREF_SUPERSPREAD_SOURCE_MASK));
387 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
388 }
389 
390 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
391 				struct intel_shared_dpll *pll)
392 {
393 	const enum intel_dpll_id id = pll->info->id;
394 
395 	/* PCH refclock must be enabled first */
396 	ibx_assert_pch_refclk_enabled(dev_priv);
397 
398 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
399 
400 	/* Wait for the clocks to stabilize. */
401 	POSTING_READ(PCH_DPLL(id));
402 	udelay(150);
403 
404 	/* The pixel multiplier can only be updated once the
405 	 * DPLL is enabled and the clocks are stable.
406 	 *
407 	 * So write it again.
408 	 */
409 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
410 	POSTING_READ(PCH_DPLL(id));
411 	udelay(200);
412 }
413 
414 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
415 				 struct intel_shared_dpll *pll)
416 {
417 	const enum intel_dpll_id id = pll->info->id;
418 
419 	I915_WRITE(PCH_DPLL(id), 0);
420 	POSTING_READ(PCH_DPLL(id));
421 	udelay(200);
422 }
423 
424 static struct intel_shared_dpll *
425 ibx_get_dpll(struct intel_crtc_state *crtc_state,
426 	     struct intel_encoder *encoder)
427 {
428 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
429 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
430 	struct intel_shared_dpll *pll;
431 	enum intel_dpll_id i;
432 
433 	if (HAS_PCH_IBX(dev_priv)) {
434 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
435 		i = (enum intel_dpll_id) crtc->pipe;
436 		pll = &dev_priv->shared_dplls[i];
437 
438 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
439 			      crtc->base.base.id, crtc->base.name,
440 			      pll->info->name);
441 	} else {
442 		pll = intel_find_shared_dpll(crtc_state,
443 					     DPLL_ID_PCH_PLL_A,
444 					     DPLL_ID_PCH_PLL_B);
445 	}
446 
447 	if (!pll)
448 		return NULL;
449 
450 	/* reference the pll */
451 	intel_reference_shared_dpll(pll, crtc_state);
452 
453 	return pll;
454 }
455 
456 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
457 			      const struct intel_dpll_hw_state *hw_state)
458 {
459 	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
460 		      "fp0: 0x%x, fp1: 0x%x\n",
461 		      hw_state->dpll,
462 		      hw_state->dpll_md,
463 		      hw_state->fp0,
464 		      hw_state->fp1);
465 }
466 
467 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
468 	.prepare = ibx_pch_dpll_prepare,
469 	.enable = ibx_pch_dpll_enable,
470 	.disable = ibx_pch_dpll_disable,
471 	.get_hw_state = ibx_pch_dpll_get_hw_state,
472 };
473 
474 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
475 			       struct intel_shared_dpll *pll)
476 {
477 	const enum intel_dpll_id id = pll->info->id;
478 
479 	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
480 	POSTING_READ(WRPLL_CTL(id));
481 	udelay(20);
482 }
483 
484 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
485 				struct intel_shared_dpll *pll)
486 {
487 	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
488 	POSTING_READ(SPLL_CTL);
489 	udelay(20);
490 }
491 
492 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
493 				  struct intel_shared_dpll *pll)
494 {
495 	const enum intel_dpll_id id = pll->info->id;
496 	u32 val;
497 
498 	val = I915_READ(WRPLL_CTL(id));
499 	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
500 	POSTING_READ(WRPLL_CTL(id));
501 }
502 
503 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
504 				 struct intel_shared_dpll *pll)
505 {
506 	u32 val;
507 
508 	val = I915_READ(SPLL_CTL);
509 	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
510 	POSTING_READ(SPLL_CTL);
511 }
512 
513 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
514 				       struct intel_shared_dpll *pll,
515 				       struct intel_dpll_hw_state *hw_state)
516 {
517 	const enum intel_dpll_id id = pll->info->id;
518 	intel_wakeref_t wakeref;
519 	u32 val;
520 
521 	wakeref = intel_display_power_get_if_enabled(dev_priv,
522 						     POWER_DOMAIN_DISPLAY_CORE);
523 	if (!wakeref)
524 		return false;
525 
526 	val = I915_READ(WRPLL_CTL(id));
527 	hw_state->wrpll = val;
528 
529 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
530 
531 	return val & WRPLL_PLL_ENABLE;
532 }
533 
534 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
535 				      struct intel_shared_dpll *pll,
536 				      struct intel_dpll_hw_state *hw_state)
537 {
538 	intel_wakeref_t wakeref;
539 	u32 val;
540 
541 	wakeref = intel_display_power_get_if_enabled(dev_priv,
542 						     POWER_DOMAIN_DISPLAY_CORE);
543 	if (!wakeref)
544 		return false;
545 
546 	val = I915_READ(SPLL_CTL);
547 	hw_state->spll = val;
548 
549 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
550 
551 	return val & SPLL_PLL_ENABLE;
552 }
553 
554 #define LC_FREQ 2700
555 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
556 
557 #define P_MIN 2
558 #define P_MAX 64
559 #define P_INC 2
560 
561 /* Constraints for PLL good behavior */
562 #define REF_MIN 48
563 #define REF_MAX 400
564 #define VCO_MIN 2400
565 #define VCO_MAX 4800
566 
567 struct hsw_wrpll_rnp {
568 	unsigned p, n2, r2;
569 };
570 
571 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
572 {
573 	unsigned budget;
574 
575 	switch (clock) {
576 	case 25175000:
577 	case 25200000:
578 	case 27000000:
579 	case 27027000:
580 	case 37762500:
581 	case 37800000:
582 	case 40500000:
583 	case 40541000:
584 	case 54000000:
585 	case 54054000:
586 	case 59341000:
587 	case 59400000:
588 	case 72000000:
589 	case 74176000:
590 	case 74250000:
591 	case 81000000:
592 	case 81081000:
593 	case 89012000:
594 	case 89100000:
595 	case 108000000:
596 	case 108108000:
597 	case 111264000:
598 	case 111375000:
599 	case 148352000:
600 	case 148500000:
601 	case 162000000:
602 	case 162162000:
603 	case 222525000:
604 	case 222750000:
605 	case 296703000:
606 	case 297000000:
607 		budget = 0;
608 		break;
609 	case 233500000:
610 	case 245250000:
611 	case 247750000:
612 	case 253250000:
613 	case 298000000:
614 		budget = 1500;
615 		break;
616 	case 169128000:
617 	case 169500000:
618 	case 179500000:
619 	case 202000000:
620 		budget = 2000;
621 		break;
622 	case 256250000:
623 	case 262500000:
624 	case 270000000:
625 	case 272500000:
626 	case 273750000:
627 	case 280750000:
628 	case 281250000:
629 	case 286000000:
630 	case 291750000:
631 		budget = 4000;
632 		break;
633 	case 267250000:
634 	case 268500000:
635 		budget = 5000;
636 		break;
637 	default:
638 		budget = 1000;
639 		break;
640 	}
641 
642 	return budget;
643 }
644 
645 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
646 				 unsigned int r2, unsigned int n2,
647 				 unsigned int p,
648 				 struct hsw_wrpll_rnp *best)
649 {
650 	u64 a, b, c, d, diff, diff_best;
651 
652 	/* No best (r,n,p) yet */
653 	if (best->p == 0) {
654 		best->p = p;
655 		best->n2 = n2;
656 		best->r2 = r2;
657 		return;
658 	}
659 
660 	/*
661 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
662 	 * freq2k.
663 	 *
664 	 * delta = 1e6 *
665 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
666 	 *	   freq2k;
667 	 *
668 	 * and we would like delta <= budget.
669 	 *
670 	 * If the discrepancy is above the PPM-based budget, always prefer to
671 	 * improve upon the previous solution.  However, if you're within the
672 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
673 	 */
674 	a = freq2k * budget * p * r2;
675 	b = freq2k * budget * best->p * best->r2;
676 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
677 	diff_best = abs_diff(freq2k * best->p * best->r2,
678 			     LC_FREQ_2K * best->n2);
679 	c = 1000000 * diff;
680 	d = 1000000 * diff_best;
681 
682 	if (a < c && b < d) {
683 		/* If both are above the budget, pick the closer */
684 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
685 			best->p = p;
686 			best->n2 = n2;
687 			best->r2 = r2;
688 		}
689 	} else if (a >= c && b < d) {
690 		/* If A is below the threshold but B is above it?  Update. */
691 		best->p = p;
692 		best->n2 = n2;
693 		best->r2 = r2;
694 	} else if (a >= c && b >= d) {
695 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
696 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
697 			best->p = p;
698 			best->n2 = n2;
699 			best->r2 = r2;
700 		}
701 	}
702 	/* Otherwise a < c && b >= d, do nothing */
703 }
704 
705 static void
706 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
707 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
708 {
709 	u64 freq2k;
710 	unsigned p, n2, r2;
711 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
712 	unsigned budget;
713 
714 	freq2k = clock / 100;
715 
716 	budget = hsw_wrpll_get_budget_for_freq(clock);
717 
718 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
719 	 * and directly pass the LC PLL to it. */
720 	if (freq2k == 5400000) {
721 		*n2_out = 2;
722 		*p_out = 1;
723 		*r2_out = 2;
724 		return;
725 	}
726 
727 	/*
728 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
729 	 * the WR PLL.
730 	 *
731 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
732 	 * Injecting R2 = 2 * R gives:
733 	 *   REF_MAX * r2 > LC_FREQ * 2 and
734 	 *   REF_MIN * r2 < LC_FREQ * 2
735 	 *
736 	 * Which means the desired boundaries for r2 are:
737 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
738 	 *
739 	 */
740 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
741 	     r2 <= LC_FREQ * 2 / REF_MIN;
742 	     r2++) {
743 
744 		/*
745 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
746 		 *
747 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
748 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
749 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
750 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
751 		 *
752 		 * Which means the desired boundaries for n2 are:
753 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
754 		 */
755 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
756 		     n2 <= VCO_MAX * r2 / LC_FREQ;
757 		     n2++) {
758 
759 			for (p = P_MIN; p <= P_MAX; p += P_INC)
760 				hsw_wrpll_update_rnp(freq2k, budget,
761 						     r2, n2, p, &best);
762 		}
763 	}
764 
765 	*n2_out = best.n2;
766 	*p_out = best.p;
767 	*r2_out = best.r2;
768 }
769 
770 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
771 {
772 	struct intel_shared_dpll *pll;
773 	u32 val;
774 	unsigned int p, n2, r2;
775 
776 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
777 
778 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
779 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
780 	      WRPLL_DIVIDER_POST(p);
781 
782 	crtc_state->dpll_hw_state.wrpll = val;
783 
784 	pll = intel_find_shared_dpll(crtc_state,
785 				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
786 
787 	if (!pll)
788 		return NULL;
789 
790 	return pll;
791 }
792 
793 static struct intel_shared_dpll *
794 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
795 {
796 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
797 	struct intel_shared_dpll *pll;
798 	enum intel_dpll_id pll_id;
799 	int clock = crtc_state->port_clock;
800 
801 	switch (clock / 2) {
802 	case 81000:
803 		pll_id = DPLL_ID_LCPLL_810;
804 		break;
805 	case 135000:
806 		pll_id = DPLL_ID_LCPLL_1350;
807 		break;
808 	case 270000:
809 		pll_id = DPLL_ID_LCPLL_2700;
810 		break;
811 	default:
812 		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
813 		return NULL;
814 	}
815 
816 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
817 
818 	if (!pll)
819 		return NULL;
820 
821 	return pll;
822 }
823 
824 static struct intel_shared_dpll *
825 hsw_get_dpll(struct intel_crtc_state *crtc_state,
826 	     struct intel_encoder *encoder)
827 {
828 	struct intel_shared_dpll *pll;
829 
830 	memset(&crtc_state->dpll_hw_state, 0,
831 	       sizeof(crtc_state->dpll_hw_state));
832 
833 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
834 		pll = hsw_ddi_hdmi_get_dpll(crtc_state);
835 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
836 		pll = hsw_ddi_dp_get_dpll(crtc_state);
837 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
838 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
839 			return NULL;
840 
841 		crtc_state->dpll_hw_state.spll =
842 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
843 
844 		pll = intel_find_shared_dpll(crtc_state,
845 					     DPLL_ID_SPLL, DPLL_ID_SPLL);
846 	} else {
847 		return NULL;
848 	}
849 
850 	if (!pll)
851 		return NULL;
852 
853 	intel_reference_shared_dpll(pll, crtc_state);
854 
855 	return pll;
856 }
857 
858 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
859 			      const struct intel_dpll_hw_state *hw_state)
860 {
861 	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
862 		      hw_state->wrpll, hw_state->spll);
863 }
864 
865 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
866 	.enable = hsw_ddi_wrpll_enable,
867 	.disable = hsw_ddi_wrpll_disable,
868 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
869 };
870 
871 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
872 	.enable = hsw_ddi_spll_enable,
873 	.disable = hsw_ddi_spll_disable,
874 	.get_hw_state = hsw_ddi_spll_get_hw_state,
875 };
876 
877 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
878 				 struct intel_shared_dpll *pll)
879 {
880 }
881 
882 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
883 				  struct intel_shared_dpll *pll)
884 {
885 }
886 
887 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
888 				       struct intel_shared_dpll *pll,
889 				       struct intel_dpll_hw_state *hw_state)
890 {
891 	return true;
892 }
893 
894 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
895 	.enable = hsw_ddi_lcpll_enable,
896 	.disable = hsw_ddi_lcpll_disable,
897 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
898 };
899 
900 struct skl_dpll_regs {
901 	i915_reg_t ctl, cfgcr1, cfgcr2;
902 };
903 
904 /* this array is indexed by the *shared* pll id */
905 static const struct skl_dpll_regs skl_dpll_regs[4] = {
906 	{
907 		/* DPLL 0 */
908 		.ctl = LCPLL1_CTL,
909 		/* DPLL 0 doesn't support HDMI mode */
910 	},
911 	{
912 		/* DPLL 1 */
913 		.ctl = LCPLL2_CTL,
914 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
915 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
916 	},
917 	{
918 		/* DPLL 2 */
919 		.ctl = WRPLL_CTL(0),
920 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
921 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
922 	},
923 	{
924 		/* DPLL 3 */
925 		.ctl = WRPLL_CTL(1),
926 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
927 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
928 	},
929 };
930 
931 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
932 				    struct intel_shared_dpll *pll)
933 {
934 	const enum intel_dpll_id id = pll->info->id;
935 	u32 val;
936 
937 	val = I915_READ(DPLL_CTRL1);
938 
939 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
940 		 DPLL_CTRL1_SSC(id) |
941 		 DPLL_CTRL1_LINK_RATE_MASK(id));
942 	val |= pll->state.hw_state.ctrl1 << (id * 6);
943 
944 	I915_WRITE(DPLL_CTRL1, val);
945 	POSTING_READ(DPLL_CTRL1);
946 }
947 
948 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
949 			       struct intel_shared_dpll *pll)
950 {
951 	const struct skl_dpll_regs *regs = skl_dpll_regs;
952 	const enum intel_dpll_id id = pll->info->id;
953 
954 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
955 
956 	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
957 	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
958 	POSTING_READ(regs[id].cfgcr1);
959 	POSTING_READ(regs[id].cfgcr2);
960 
961 	/* the enable bit is always bit 31 */
962 	I915_WRITE(regs[id].ctl,
963 		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
964 
965 	if (intel_wait_for_register(&dev_priv->uncore,
966 				    DPLL_STATUS,
967 				    DPLL_LOCK(id),
968 				    DPLL_LOCK(id),
969 				    5))
970 		DRM_ERROR("DPLL %d not locked\n", id);
971 }
972 
973 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
974 				 struct intel_shared_dpll *pll)
975 {
976 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
977 }
978 
979 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
980 				struct intel_shared_dpll *pll)
981 {
982 	const struct skl_dpll_regs *regs = skl_dpll_regs;
983 	const enum intel_dpll_id id = pll->info->id;
984 
985 	/* the enable bit is always bit 31 */
986 	I915_WRITE(regs[id].ctl,
987 		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
988 	POSTING_READ(regs[id].ctl);
989 }
990 
991 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
992 				  struct intel_shared_dpll *pll)
993 {
994 }
995 
996 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
997 				     struct intel_shared_dpll *pll,
998 				     struct intel_dpll_hw_state *hw_state)
999 {
1000 	u32 val;
1001 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1002 	const enum intel_dpll_id id = pll->info->id;
1003 	intel_wakeref_t wakeref;
1004 	bool ret;
1005 
1006 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1007 						     POWER_DOMAIN_DISPLAY_CORE);
1008 	if (!wakeref)
1009 		return false;
1010 
1011 	ret = false;
1012 
1013 	val = I915_READ(regs[id].ctl);
1014 	if (!(val & LCPLL_PLL_ENABLE))
1015 		goto out;
1016 
1017 	val = I915_READ(DPLL_CTRL1);
1018 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1019 
1020 	/* avoid reading back stale values if HDMI mode is not enabled */
1021 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1022 		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1023 		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1024 	}
1025 	ret = true;
1026 
1027 out:
1028 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1029 
1030 	return ret;
1031 }
1032 
1033 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1034 				       struct intel_shared_dpll *pll,
1035 				       struct intel_dpll_hw_state *hw_state)
1036 {
1037 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1038 	const enum intel_dpll_id id = pll->info->id;
1039 	intel_wakeref_t wakeref;
1040 	u32 val;
1041 	bool ret;
1042 
1043 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1044 						     POWER_DOMAIN_DISPLAY_CORE);
1045 	if (!wakeref)
1046 		return false;
1047 
1048 	ret = false;
1049 
1050 	/* DPLL0 is always enabled since it drives CDCLK */
1051 	val = I915_READ(regs[id].ctl);
1052 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1053 		goto out;
1054 
1055 	val = I915_READ(DPLL_CTRL1);
1056 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1057 
1058 	ret = true;
1059 
1060 out:
1061 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1062 
1063 	return ret;
1064 }
1065 
1066 struct skl_wrpll_context {
1067 	u64 min_deviation;		/* current minimal deviation */
1068 	u64 central_freq;		/* chosen central freq */
1069 	u64 dco_freq;			/* chosen dco freq */
1070 	unsigned int p;			/* chosen divider */
1071 };
1072 
1073 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1074 {
1075 	memset(ctx, 0, sizeof(*ctx));
1076 
1077 	ctx->min_deviation = U64_MAX;
1078 }
1079 
1080 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1081 #define SKL_DCO_MAX_PDEVIATION	100
1082 #define SKL_DCO_MAX_NDEVIATION	600
1083 
1084 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1085 				  u64 central_freq,
1086 				  u64 dco_freq,
1087 				  unsigned int divider)
1088 {
1089 	u64 deviation;
1090 
1091 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1092 			      central_freq);
1093 
1094 	/* positive deviation */
1095 	if (dco_freq >= central_freq) {
1096 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1097 		    deviation < ctx->min_deviation) {
1098 			ctx->min_deviation = deviation;
1099 			ctx->central_freq = central_freq;
1100 			ctx->dco_freq = dco_freq;
1101 			ctx->p = divider;
1102 		}
1103 	/* negative deviation */
1104 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1105 		   deviation < ctx->min_deviation) {
1106 		ctx->min_deviation = deviation;
1107 		ctx->central_freq = central_freq;
1108 		ctx->dco_freq = dco_freq;
1109 		ctx->p = divider;
1110 	}
1111 }
1112 
1113 static void skl_wrpll_get_multipliers(unsigned int p,
1114 				      unsigned int *p0 /* out */,
1115 				      unsigned int *p1 /* out */,
1116 				      unsigned int *p2 /* out */)
1117 {
1118 	/* even dividers */
1119 	if (p % 2 == 0) {
1120 		unsigned int half = p / 2;
1121 
1122 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1123 			*p0 = 2;
1124 			*p1 = 1;
1125 			*p2 = half;
1126 		} else if (half % 2 == 0) {
1127 			*p0 = 2;
1128 			*p1 = half / 2;
1129 			*p2 = 2;
1130 		} else if (half % 3 == 0) {
1131 			*p0 = 3;
1132 			*p1 = half / 3;
1133 			*p2 = 2;
1134 		} else if (half % 7 == 0) {
1135 			*p0 = 7;
1136 			*p1 = half / 7;
1137 			*p2 = 2;
1138 		}
1139 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1140 		*p0 = 3;
1141 		*p1 = 1;
1142 		*p2 = p / 3;
1143 	} else if (p == 5 || p == 7) {
1144 		*p0 = p;
1145 		*p1 = 1;
1146 		*p2 = 1;
1147 	} else if (p == 15) {
1148 		*p0 = 3;
1149 		*p1 = 1;
1150 		*p2 = 5;
1151 	} else if (p == 21) {
1152 		*p0 = 7;
1153 		*p1 = 1;
1154 		*p2 = 3;
1155 	} else if (p == 35) {
1156 		*p0 = 7;
1157 		*p1 = 1;
1158 		*p2 = 5;
1159 	}
1160 }
1161 
1162 struct skl_wrpll_params {
1163 	u32 dco_fraction;
1164 	u32 dco_integer;
1165 	u32 qdiv_ratio;
1166 	u32 qdiv_mode;
1167 	u32 kdiv;
1168 	u32 pdiv;
1169 	u32 central_freq;
1170 };
1171 
1172 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1173 				      u64 afe_clock,
1174 				      u64 central_freq,
1175 				      u32 p0, u32 p1, u32 p2)
1176 {
1177 	u64 dco_freq;
1178 
1179 	switch (central_freq) {
1180 	case 9600000000ULL:
1181 		params->central_freq = 0;
1182 		break;
1183 	case 9000000000ULL:
1184 		params->central_freq = 1;
1185 		break;
1186 	case 8400000000ULL:
1187 		params->central_freq = 3;
1188 	}
1189 
1190 	switch (p0) {
1191 	case 1:
1192 		params->pdiv = 0;
1193 		break;
1194 	case 2:
1195 		params->pdiv = 1;
1196 		break;
1197 	case 3:
1198 		params->pdiv = 2;
1199 		break;
1200 	case 7:
1201 		params->pdiv = 4;
1202 		break;
1203 	default:
1204 		WARN(1, "Incorrect PDiv\n");
1205 	}
1206 
1207 	switch (p2) {
1208 	case 5:
1209 		params->kdiv = 0;
1210 		break;
1211 	case 2:
1212 		params->kdiv = 1;
1213 		break;
1214 	case 3:
1215 		params->kdiv = 2;
1216 		break;
1217 	case 1:
1218 		params->kdiv = 3;
1219 		break;
1220 	default:
1221 		WARN(1, "Incorrect KDiv\n");
1222 	}
1223 
1224 	params->qdiv_ratio = p1;
1225 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1226 
1227 	dco_freq = p0 * p1 * p2 * afe_clock;
1228 
1229 	/*
1230 	 * Intermediate values are in Hz.
1231 	 * Divide by MHz to match bsepc
1232 	 */
1233 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1234 	params->dco_fraction =
1235 		div_u64((div_u64(dco_freq, 24) -
1236 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1237 }
1238 
1239 static bool
1240 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1241 			struct skl_wrpll_params *wrpll_params)
1242 {
1243 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1244 	u64 dco_central_freq[3] = { 8400000000ULL,
1245 				    9000000000ULL,
1246 				    9600000000ULL };
1247 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1248 					     24, 28, 30, 32, 36, 40, 42, 44,
1249 					     48, 52, 54, 56, 60, 64, 66, 68,
1250 					     70, 72, 76, 78, 80, 84, 88, 90,
1251 					     92, 96, 98 };
1252 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1253 	static const struct {
1254 		const int *list;
1255 		int n_dividers;
1256 	} dividers[] = {
1257 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1258 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1259 	};
1260 	struct skl_wrpll_context ctx;
1261 	unsigned int dco, d, i;
1262 	unsigned int p0, p1, p2;
1263 
1264 	skl_wrpll_context_init(&ctx);
1265 
1266 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1267 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1268 			for (i = 0; i < dividers[d].n_dividers; i++) {
1269 				unsigned int p = dividers[d].list[i];
1270 				u64 dco_freq = p * afe_clock;
1271 
1272 				skl_wrpll_try_divider(&ctx,
1273 						      dco_central_freq[dco],
1274 						      dco_freq,
1275 						      p);
1276 				/*
1277 				 * Skip the remaining dividers if we're sure to
1278 				 * have found the definitive divider, we can't
1279 				 * improve a 0 deviation.
1280 				 */
1281 				if (ctx.min_deviation == 0)
1282 					goto skip_remaining_dividers;
1283 			}
1284 		}
1285 
1286 skip_remaining_dividers:
1287 		/*
1288 		 * If a solution is found with an even divider, prefer
1289 		 * this one.
1290 		 */
1291 		if (d == 0 && ctx.p)
1292 			break;
1293 	}
1294 
1295 	if (!ctx.p) {
1296 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1297 		return false;
1298 	}
1299 
1300 	/*
1301 	 * gcc incorrectly analyses that these can be used without being
1302 	 * initialized. To be fair, it's hard to guess.
1303 	 */
1304 	p0 = p1 = p2 = 0;
1305 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1306 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1307 				  p0, p1, p2);
1308 
1309 	return true;
1310 }
1311 
1312 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1313 {
1314 	u32 ctrl1, cfgcr1, cfgcr2;
1315 	struct skl_wrpll_params wrpll_params = { 0, };
1316 
1317 	/*
1318 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1319 	 * as the DPLL id in this function.
1320 	 */
1321 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1322 
1323 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1324 
1325 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1326 				     &wrpll_params))
1327 		return false;
1328 
1329 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1330 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1331 		wrpll_params.dco_integer;
1332 
1333 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1334 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1335 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1336 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1337 		wrpll_params.central_freq;
1338 
1339 	memset(&crtc_state->dpll_hw_state, 0,
1340 	       sizeof(crtc_state->dpll_hw_state));
1341 
1342 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1343 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1344 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1345 	return true;
1346 }
1347 
1348 static bool
1349 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1350 {
1351 	u32 ctrl1;
1352 
1353 	/*
1354 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1355 	 * as the DPLL id in this function.
1356 	 */
1357 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1358 	switch (crtc_state->port_clock / 2) {
1359 	case 81000:
1360 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1361 		break;
1362 	case 135000:
1363 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1364 		break;
1365 	case 270000:
1366 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1367 		break;
1368 		/* eDP 1.4 rates */
1369 	case 162000:
1370 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1371 		break;
1372 	case 108000:
1373 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1374 		break;
1375 	case 216000:
1376 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1377 		break;
1378 	}
1379 
1380 	memset(&crtc_state->dpll_hw_state, 0,
1381 	       sizeof(crtc_state->dpll_hw_state));
1382 
1383 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1384 
1385 	return true;
1386 }
1387 
1388 static struct intel_shared_dpll *
1389 skl_get_dpll(struct intel_crtc_state *crtc_state,
1390 	     struct intel_encoder *encoder)
1391 {
1392 	struct intel_shared_dpll *pll;
1393 	bool bret;
1394 
1395 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1396 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1397 		if (!bret) {
1398 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1399 			return NULL;
1400 		}
1401 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1402 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1403 		if (!bret) {
1404 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1405 			return NULL;
1406 		}
1407 	} else {
1408 		return NULL;
1409 	}
1410 
1411 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1412 		pll = intel_find_shared_dpll(crtc_state,
1413 					     DPLL_ID_SKL_DPLL0,
1414 					     DPLL_ID_SKL_DPLL0);
1415 	else
1416 		pll = intel_find_shared_dpll(crtc_state,
1417 					     DPLL_ID_SKL_DPLL1,
1418 					     DPLL_ID_SKL_DPLL3);
1419 	if (!pll)
1420 		return NULL;
1421 
1422 	intel_reference_shared_dpll(pll, crtc_state);
1423 
1424 	return pll;
1425 }
1426 
1427 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1428 			      const struct intel_dpll_hw_state *hw_state)
1429 {
1430 	DRM_DEBUG_KMS("dpll_hw_state: "
1431 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1432 		      hw_state->ctrl1,
1433 		      hw_state->cfgcr1,
1434 		      hw_state->cfgcr2);
1435 }
1436 
1437 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1438 	.enable = skl_ddi_pll_enable,
1439 	.disable = skl_ddi_pll_disable,
1440 	.get_hw_state = skl_ddi_pll_get_hw_state,
1441 };
1442 
1443 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1444 	.enable = skl_ddi_dpll0_enable,
1445 	.disable = skl_ddi_dpll0_disable,
1446 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1447 };
1448 
1449 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1450 				struct intel_shared_dpll *pll)
1451 {
1452 	u32 temp;
1453 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1454 	enum dpio_phy phy;
1455 	enum dpio_channel ch;
1456 
1457 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1458 
1459 	/* Non-SSC reference */
1460 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1461 	temp |= PORT_PLL_REF_SEL;
1462 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1463 
1464 	if (IS_GEMINILAKE(dev_priv)) {
1465 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1466 		temp |= PORT_PLL_POWER_ENABLE;
1467 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1468 
1469 		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1470 				 PORT_PLL_POWER_STATE), 200))
1471 			DRM_ERROR("Power state not set for PLL:%d\n", port);
1472 	}
1473 
1474 	/* Disable 10 bit clock */
1475 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1476 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1477 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1478 
1479 	/* Write P1 & P2 */
1480 	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1481 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1482 	temp |= pll->state.hw_state.ebb0;
1483 	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1484 
1485 	/* Write M2 integer */
1486 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1487 	temp &= ~PORT_PLL_M2_MASK;
1488 	temp |= pll->state.hw_state.pll0;
1489 	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1490 
1491 	/* Write N */
1492 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1493 	temp &= ~PORT_PLL_N_MASK;
1494 	temp |= pll->state.hw_state.pll1;
1495 	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1496 
1497 	/* Write M2 fraction */
1498 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1499 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1500 	temp |= pll->state.hw_state.pll2;
1501 	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1502 
1503 	/* Write M2 fraction enable */
1504 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1505 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1506 	temp |= pll->state.hw_state.pll3;
1507 	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1508 
1509 	/* Write coeff */
1510 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1511 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1512 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1513 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1514 	temp |= pll->state.hw_state.pll6;
1515 	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1516 
1517 	/* Write calibration val */
1518 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1519 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1520 	temp |= pll->state.hw_state.pll8;
1521 	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1522 
1523 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1524 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1525 	temp |= pll->state.hw_state.pll9;
1526 	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1527 
1528 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1529 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1530 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1531 	temp |= pll->state.hw_state.pll10;
1532 	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1533 
1534 	/* Recalibrate with new settings */
1535 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1536 	temp |= PORT_PLL_RECALIBRATE;
1537 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1538 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1539 	temp |= pll->state.hw_state.ebb4;
1540 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1541 
1542 	/* Enable PLL */
1543 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1544 	temp |= PORT_PLL_ENABLE;
1545 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1546 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1547 
1548 	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1549 			200))
1550 		DRM_ERROR("PLL %d not locked\n", port);
1551 
1552 	if (IS_GEMINILAKE(dev_priv)) {
1553 		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1554 		temp |= DCC_DELAY_RANGE_2;
1555 		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1556 	}
1557 
1558 	/*
1559 	 * While we write to the group register to program all lanes at once we
1560 	 * can read only lane registers and we pick lanes 0/1 for that.
1561 	 */
1562 	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1563 	temp &= ~LANE_STAGGER_MASK;
1564 	temp &= ~LANESTAGGER_STRAP_OVRD;
1565 	temp |= pll->state.hw_state.pcsdw12;
1566 	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1567 }
1568 
1569 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1570 					struct intel_shared_dpll *pll)
1571 {
1572 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1573 	u32 temp;
1574 
1575 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1576 	temp &= ~PORT_PLL_ENABLE;
1577 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1578 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1579 
1580 	if (IS_GEMINILAKE(dev_priv)) {
1581 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1582 		temp &= ~PORT_PLL_POWER_ENABLE;
1583 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1584 
1585 		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1586 				PORT_PLL_POWER_STATE), 200))
1587 			DRM_ERROR("Power state not reset for PLL:%d\n", port);
1588 	}
1589 }
1590 
1591 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1592 					struct intel_shared_dpll *pll,
1593 					struct intel_dpll_hw_state *hw_state)
1594 {
1595 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1596 	intel_wakeref_t wakeref;
1597 	enum dpio_phy phy;
1598 	enum dpio_channel ch;
1599 	u32 val;
1600 	bool ret;
1601 
1602 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1603 
1604 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1605 						     POWER_DOMAIN_DISPLAY_CORE);
1606 	if (!wakeref)
1607 		return false;
1608 
1609 	ret = false;
1610 
1611 	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1612 	if (!(val & PORT_PLL_ENABLE))
1613 		goto out;
1614 
1615 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1616 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1617 
1618 	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1619 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1620 
1621 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1622 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1623 
1624 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1625 	hw_state->pll1 &= PORT_PLL_N_MASK;
1626 
1627 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1628 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1629 
1630 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1631 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1632 
1633 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1634 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1635 			  PORT_PLL_INT_COEFF_MASK |
1636 			  PORT_PLL_GAIN_CTL_MASK;
1637 
1638 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1639 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1640 
1641 	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1642 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1643 
1644 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1645 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1646 			   PORT_PLL_DCO_AMP_MASK;
1647 
1648 	/*
1649 	 * While we write to the group register to program all lanes at once we
1650 	 * can read only lane registers. We configure all lanes the same way, so
1651 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1652 	 */
1653 	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1654 	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1655 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1656 				 hw_state->pcsdw12,
1657 				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1658 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1659 
1660 	ret = true;
1661 
1662 out:
1663 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1664 
1665 	return ret;
1666 }
1667 
1668 /* bxt clock parameters */
1669 struct bxt_clk_div {
1670 	int clock;
1671 	u32 p1;
1672 	u32 p2;
1673 	u32 m2_int;
1674 	u32 m2_frac;
1675 	bool m2_frac_en;
1676 	u32 n;
1677 
1678 	int vco;
1679 };
1680 
1681 /* pre-calculated values for DP linkrates */
1682 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1683 	{162000, 4, 2, 32, 1677722, 1, 1},
1684 	{270000, 4, 1, 27,       0, 0, 1},
1685 	{540000, 2, 1, 27,       0, 0, 1},
1686 	{216000, 3, 2, 32, 1677722, 1, 1},
1687 	{243000, 4, 1, 24, 1258291, 1, 1},
1688 	{324000, 4, 1, 32, 1677722, 1, 1},
1689 	{432000, 3, 1, 32, 1677722, 1, 1}
1690 };
1691 
1692 static bool
1693 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1694 			  struct bxt_clk_div *clk_div)
1695 {
1696 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1697 	struct dpll best_clock;
1698 
1699 	/* Calculate HDMI div */
1700 	/*
1701 	 * FIXME: tie the following calculation into
1702 	 * i9xx_crtc_compute_clock
1703 	 */
1704 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1705 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1706 				 crtc_state->port_clock,
1707 				 pipe_name(crtc->pipe));
1708 		return false;
1709 	}
1710 
1711 	clk_div->p1 = best_clock.p1;
1712 	clk_div->p2 = best_clock.p2;
1713 	WARN_ON(best_clock.m1 != 2);
1714 	clk_div->n = best_clock.n;
1715 	clk_div->m2_int = best_clock.m2 >> 22;
1716 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1717 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1718 
1719 	clk_div->vco = best_clock.vco;
1720 
1721 	return true;
1722 }
1723 
1724 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1725 				    struct bxt_clk_div *clk_div)
1726 {
1727 	int clock = crtc_state->port_clock;
1728 	int i;
1729 
1730 	*clk_div = bxt_dp_clk_val[0];
1731 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1732 		if (bxt_dp_clk_val[i].clock == clock) {
1733 			*clk_div = bxt_dp_clk_val[i];
1734 			break;
1735 		}
1736 	}
1737 
1738 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1739 }
1740 
1741 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1742 				      const struct bxt_clk_div *clk_div)
1743 {
1744 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1745 	int clock = crtc_state->port_clock;
1746 	int vco = clk_div->vco;
1747 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1748 	u32 lanestagger;
1749 
1750 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1751 
1752 	if (vco >= 6200000 && vco <= 6700000) {
1753 		prop_coef = 4;
1754 		int_coef = 9;
1755 		gain_ctl = 3;
1756 		targ_cnt = 8;
1757 	} else if ((vco > 5400000 && vco < 6200000) ||
1758 			(vco >= 4800000 && vco < 5400000)) {
1759 		prop_coef = 5;
1760 		int_coef = 11;
1761 		gain_ctl = 3;
1762 		targ_cnt = 9;
1763 	} else if (vco == 5400000) {
1764 		prop_coef = 3;
1765 		int_coef = 8;
1766 		gain_ctl = 1;
1767 		targ_cnt = 9;
1768 	} else {
1769 		DRM_ERROR("Invalid VCO\n");
1770 		return false;
1771 	}
1772 
1773 	if (clock > 270000)
1774 		lanestagger = 0x18;
1775 	else if (clock > 135000)
1776 		lanestagger = 0x0d;
1777 	else if (clock > 67000)
1778 		lanestagger = 0x07;
1779 	else if (clock > 33000)
1780 		lanestagger = 0x04;
1781 	else
1782 		lanestagger = 0x02;
1783 
1784 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1785 	dpll_hw_state->pll0 = clk_div->m2_int;
1786 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1787 	dpll_hw_state->pll2 = clk_div->m2_frac;
1788 
1789 	if (clk_div->m2_frac_en)
1790 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1791 
1792 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1793 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1794 
1795 	dpll_hw_state->pll8 = targ_cnt;
1796 
1797 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1798 
1799 	dpll_hw_state->pll10 =
1800 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1801 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1802 
1803 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1804 
1805 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1806 
1807 	return true;
1808 }
1809 
1810 static bool
1811 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1812 {
1813 	struct bxt_clk_div clk_div = {};
1814 
1815 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1816 
1817 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1818 }
1819 
1820 static bool
1821 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1822 {
1823 	struct bxt_clk_div clk_div = {};
1824 
1825 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1826 
1827 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1828 }
1829 
1830 static struct intel_shared_dpll *
1831 bxt_get_dpll(struct intel_crtc_state *crtc_state,
1832 	     struct intel_encoder *encoder)
1833 {
1834 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1835 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1836 	struct intel_shared_dpll *pll;
1837 	enum intel_dpll_id id;
1838 
1839 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1840 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1841 		return NULL;
1842 
1843 	if (intel_crtc_has_dp_encoder(crtc_state) &&
1844 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1845 		return NULL;
1846 
1847 	/* 1:1 mapping between ports and PLLs */
1848 	id = (enum intel_dpll_id) encoder->port;
1849 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
1850 
1851 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1852 		      crtc->base.base.id, crtc->base.name, pll->info->name);
1853 
1854 	intel_reference_shared_dpll(pll, crtc_state);
1855 
1856 	return pll;
1857 }
1858 
1859 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1860 			      const struct intel_dpll_hw_state *hw_state)
1861 {
1862 	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1863 		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1864 		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1865 		      hw_state->ebb0,
1866 		      hw_state->ebb4,
1867 		      hw_state->pll0,
1868 		      hw_state->pll1,
1869 		      hw_state->pll2,
1870 		      hw_state->pll3,
1871 		      hw_state->pll6,
1872 		      hw_state->pll8,
1873 		      hw_state->pll9,
1874 		      hw_state->pll10,
1875 		      hw_state->pcsdw12);
1876 }
1877 
1878 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1879 	.enable = bxt_ddi_pll_enable,
1880 	.disable = bxt_ddi_pll_disable,
1881 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1882 };
1883 
1884 struct intel_dpll_mgr {
1885 	const struct dpll_info *dpll_info;
1886 
1887 	struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
1888 					      struct intel_encoder *encoder);
1889 
1890 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1891 			      const struct intel_dpll_hw_state *hw_state);
1892 };
1893 
1894 static const struct dpll_info pch_plls[] = {
1895 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1896 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1897 	{ },
1898 };
1899 
1900 static const struct intel_dpll_mgr pch_pll_mgr = {
1901 	.dpll_info = pch_plls,
1902 	.get_dpll = ibx_get_dpll,
1903 	.dump_hw_state = ibx_dump_hw_state,
1904 };
1905 
1906 static const struct dpll_info hsw_plls[] = {
1907 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1908 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1909 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1910 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1911 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1912 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1913 	{ },
1914 };
1915 
1916 static const struct intel_dpll_mgr hsw_pll_mgr = {
1917 	.dpll_info = hsw_plls,
1918 	.get_dpll = hsw_get_dpll,
1919 	.dump_hw_state = hsw_dump_hw_state,
1920 };
1921 
1922 static const struct dpll_info skl_plls[] = {
1923 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1924 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1925 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1926 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1927 	{ },
1928 };
1929 
1930 static const struct intel_dpll_mgr skl_pll_mgr = {
1931 	.dpll_info = skl_plls,
1932 	.get_dpll = skl_get_dpll,
1933 	.dump_hw_state = skl_dump_hw_state,
1934 };
1935 
1936 static const struct dpll_info bxt_plls[] = {
1937 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
1938 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1939 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1940 	{ },
1941 };
1942 
1943 static const struct intel_dpll_mgr bxt_pll_mgr = {
1944 	.dpll_info = bxt_plls,
1945 	.get_dpll = bxt_get_dpll,
1946 	.dump_hw_state = bxt_dump_hw_state,
1947 };
1948 
1949 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1950 			       struct intel_shared_dpll *pll)
1951 {
1952 	const enum intel_dpll_id id = pll->info->id;
1953 	u32 val;
1954 
1955 	/* 1. Enable DPLL power in DPLL_ENABLE. */
1956 	val = I915_READ(CNL_DPLL_ENABLE(id));
1957 	val |= PLL_POWER_ENABLE;
1958 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
1959 
1960 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
1961 	if (intel_wait_for_register(&dev_priv->uncore,
1962 				    CNL_DPLL_ENABLE(id),
1963 				    PLL_POWER_STATE,
1964 				    PLL_POWER_STATE,
1965 				    5))
1966 		DRM_ERROR("PLL %d Power not enabled\n", id);
1967 
1968 	/*
1969 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
1970 	 * select DP mode, and set DP link rate.
1971 	 */
1972 	val = pll->state.hw_state.cfgcr0;
1973 	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
1974 
1975 	/* 4. Reab back to ensure writes completed */
1976 	POSTING_READ(CNL_DPLL_CFGCR0(id));
1977 
1978 	/* 3. Configure DPLL_CFGCR0 */
1979 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
1980 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
1981 		val = pll->state.hw_state.cfgcr1;
1982 		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
1983 		/* 4. Reab back to ensure writes completed */
1984 		POSTING_READ(CNL_DPLL_CFGCR1(id));
1985 	}
1986 
1987 	/*
1988 	 * 5. If the frequency will result in a change to the voltage
1989 	 * requirement, follow the Display Voltage Frequency Switching
1990 	 * Sequence Before Frequency Change
1991 	 *
1992 	 * Note: DVFS is actually handled via the cdclk code paths,
1993 	 * hence we do nothing here.
1994 	 */
1995 
1996 	/* 6. Enable DPLL in DPLL_ENABLE. */
1997 	val = I915_READ(CNL_DPLL_ENABLE(id));
1998 	val |= PLL_ENABLE;
1999 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2000 
2001 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2002 	if (intel_wait_for_register(&dev_priv->uncore,
2003 				    CNL_DPLL_ENABLE(id),
2004 				    PLL_LOCK,
2005 				    PLL_LOCK,
2006 				    5))
2007 		DRM_ERROR("PLL %d not locked\n", id);
2008 
2009 	/*
2010 	 * 8. If the frequency will result in a change to the voltage
2011 	 * requirement, follow the Display Voltage Frequency Switching
2012 	 * Sequence After Frequency Change
2013 	 *
2014 	 * Note: DVFS is actually handled via the cdclk code paths,
2015 	 * hence we do nothing here.
2016 	 */
2017 
2018 	/*
2019 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2020 	 * Done at intel_ddi_clk_select
2021 	 */
2022 }
2023 
2024 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2025 				struct intel_shared_dpll *pll)
2026 {
2027 	const enum intel_dpll_id id = pll->info->id;
2028 	u32 val;
2029 
2030 	/*
2031 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2032 	 * Done at intel_ddi_post_disable
2033 	 */
2034 
2035 	/*
2036 	 * 2. If the frequency will result in a change to the voltage
2037 	 * requirement, follow the Display Voltage Frequency Switching
2038 	 * Sequence Before Frequency Change
2039 	 *
2040 	 * Note: DVFS is actually handled via the cdclk code paths,
2041 	 * hence we do nothing here.
2042 	 */
2043 
2044 	/* 3. Disable DPLL through DPLL_ENABLE. */
2045 	val = I915_READ(CNL_DPLL_ENABLE(id));
2046 	val &= ~PLL_ENABLE;
2047 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2048 
2049 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2050 	if (intel_wait_for_register(&dev_priv->uncore,
2051 				    CNL_DPLL_ENABLE(id),
2052 				    PLL_LOCK,
2053 				    0,
2054 				    5))
2055 		DRM_ERROR("PLL %d locked\n", id);
2056 
2057 	/*
2058 	 * 5. If the frequency will result in a change to the voltage
2059 	 * requirement, follow the Display Voltage Frequency Switching
2060 	 * Sequence After Frequency Change
2061 	 *
2062 	 * Note: DVFS is actually handled via the cdclk code paths,
2063 	 * hence we do nothing here.
2064 	 */
2065 
2066 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2067 	val = I915_READ(CNL_DPLL_ENABLE(id));
2068 	val &= ~PLL_POWER_ENABLE;
2069 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2070 
2071 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2072 	if (intel_wait_for_register(&dev_priv->uncore,
2073 				    CNL_DPLL_ENABLE(id),
2074 				    PLL_POWER_STATE,
2075 				    0,
2076 				    5))
2077 		DRM_ERROR("PLL %d Power not disabled\n", id);
2078 }
2079 
2080 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2081 				     struct intel_shared_dpll *pll,
2082 				     struct intel_dpll_hw_state *hw_state)
2083 {
2084 	const enum intel_dpll_id id = pll->info->id;
2085 	intel_wakeref_t wakeref;
2086 	u32 val;
2087 	bool ret;
2088 
2089 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2090 						     POWER_DOMAIN_DISPLAY_CORE);
2091 	if (!wakeref)
2092 		return false;
2093 
2094 	ret = false;
2095 
2096 	val = I915_READ(CNL_DPLL_ENABLE(id));
2097 	if (!(val & PLL_ENABLE))
2098 		goto out;
2099 
2100 	val = I915_READ(CNL_DPLL_CFGCR0(id));
2101 	hw_state->cfgcr0 = val;
2102 
2103 	/* avoid reading back stale values if HDMI mode is not enabled */
2104 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2105 		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2106 	}
2107 	ret = true;
2108 
2109 out:
2110 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2111 
2112 	return ret;
2113 }
2114 
2115 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2116 				      int *qdiv, int *kdiv)
2117 {
2118 	/* even dividers */
2119 	if (bestdiv % 2 == 0) {
2120 		if (bestdiv == 2) {
2121 			*pdiv = 2;
2122 			*qdiv = 1;
2123 			*kdiv = 1;
2124 		} else if (bestdiv % 4 == 0) {
2125 			*pdiv = 2;
2126 			*qdiv = bestdiv / 4;
2127 			*kdiv = 2;
2128 		} else if (bestdiv % 6 == 0) {
2129 			*pdiv = 3;
2130 			*qdiv = bestdiv / 6;
2131 			*kdiv = 2;
2132 		} else if (bestdiv % 5 == 0) {
2133 			*pdiv = 5;
2134 			*qdiv = bestdiv / 10;
2135 			*kdiv = 2;
2136 		} else if (bestdiv % 14 == 0) {
2137 			*pdiv = 7;
2138 			*qdiv = bestdiv / 14;
2139 			*kdiv = 2;
2140 		}
2141 	} else {
2142 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2143 			*pdiv = bestdiv;
2144 			*qdiv = 1;
2145 			*kdiv = 1;
2146 		} else { /* 9, 15, 21 */
2147 			*pdiv = bestdiv / 3;
2148 			*qdiv = 1;
2149 			*kdiv = 3;
2150 		}
2151 	}
2152 }
2153 
2154 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2155 				      u32 dco_freq, u32 ref_freq,
2156 				      int pdiv, int qdiv, int kdiv)
2157 {
2158 	u32 dco;
2159 
2160 	switch (kdiv) {
2161 	case 1:
2162 		params->kdiv = 1;
2163 		break;
2164 	case 2:
2165 		params->kdiv = 2;
2166 		break;
2167 	case 3:
2168 		params->kdiv = 4;
2169 		break;
2170 	default:
2171 		WARN(1, "Incorrect KDiv\n");
2172 	}
2173 
2174 	switch (pdiv) {
2175 	case 2:
2176 		params->pdiv = 1;
2177 		break;
2178 	case 3:
2179 		params->pdiv = 2;
2180 		break;
2181 	case 5:
2182 		params->pdiv = 4;
2183 		break;
2184 	case 7:
2185 		params->pdiv = 8;
2186 		break;
2187 	default:
2188 		WARN(1, "Incorrect PDiv\n");
2189 	}
2190 
2191 	WARN_ON(kdiv != 2 && qdiv != 1);
2192 
2193 	params->qdiv_ratio = qdiv;
2194 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2195 
2196 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2197 
2198 	params->dco_integer = dco >> 15;
2199 	params->dco_fraction = dco & 0x7fff;
2200 }
2201 
2202 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2203 {
2204 	int ref_clock = dev_priv->cdclk.hw.ref;
2205 
2206 	/*
2207 	 * For ICL+, the spec states: if reference frequency is 38.4,
2208 	 * use 19.2 because the DPLL automatically divides that by 2.
2209 	 */
2210 	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2211 		ref_clock = 19200;
2212 
2213 	return ref_clock;
2214 }
2215 
2216 static bool
2217 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2218 			struct skl_wrpll_params *wrpll_params)
2219 {
2220 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2221 	u32 afe_clock = crtc_state->port_clock * 5;
2222 	u32 ref_clock;
2223 	u32 dco_min = 7998000;
2224 	u32 dco_max = 10000000;
2225 	u32 dco_mid = (dco_min + dco_max) / 2;
2226 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2227 					 18, 20, 24, 28, 30, 32,  36,  40,
2228 					 42, 44, 48, 50, 52, 54,  56,  60,
2229 					 64, 66, 68, 70, 72, 76,  78,  80,
2230 					 84, 88, 90, 92, 96, 98, 100, 102,
2231 					  3,  5,  7,  9, 15, 21 };
2232 	u32 dco, best_dco = 0, dco_centrality = 0;
2233 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2234 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2235 
2236 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2237 		dco = afe_clock * dividers[d];
2238 
2239 		if ((dco <= dco_max) && (dco >= dco_min)) {
2240 			dco_centrality = abs(dco - dco_mid);
2241 
2242 			if (dco_centrality < best_dco_centrality) {
2243 				best_dco_centrality = dco_centrality;
2244 				best_div = dividers[d];
2245 				best_dco = dco;
2246 			}
2247 		}
2248 	}
2249 
2250 	if (best_div == 0)
2251 		return false;
2252 
2253 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2254 
2255 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2256 
2257 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2258 				  pdiv, qdiv, kdiv);
2259 
2260 	return true;
2261 }
2262 
2263 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2264 {
2265 	u32 cfgcr0, cfgcr1;
2266 	struct skl_wrpll_params wrpll_params = { 0, };
2267 
2268 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2269 
2270 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2271 		return false;
2272 
2273 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2274 		wrpll_params.dco_integer;
2275 
2276 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2277 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2278 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2279 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2280 		DPLL_CFGCR1_CENTRAL_FREQ;
2281 
2282 	memset(&crtc_state->dpll_hw_state, 0,
2283 	       sizeof(crtc_state->dpll_hw_state));
2284 
2285 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2286 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2287 	return true;
2288 }
2289 
2290 static bool
2291 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2292 {
2293 	u32 cfgcr0;
2294 
2295 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2296 
2297 	switch (crtc_state->port_clock / 2) {
2298 	case 81000:
2299 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2300 		break;
2301 	case 135000:
2302 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2303 		break;
2304 	case 270000:
2305 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2306 		break;
2307 		/* eDP 1.4 rates */
2308 	case 162000:
2309 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2310 		break;
2311 	case 108000:
2312 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2313 		break;
2314 	case 216000:
2315 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2316 		break;
2317 	case 324000:
2318 		/* Some SKUs may require elevated I/O voltage to support this */
2319 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2320 		break;
2321 	case 405000:
2322 		/* Some SKUs may require elevated I/O voltage to support this */
2323 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2324 		break;
2325 	}
2326 
2327 	memset(&crtc_state->dpll_hw_state, 0,
2328 	       sizeof(crtc_state->dpll_hw_state));
2329 
2330 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2331 
2332 	return true;
2333 }
2334 
2335 static struct intel_shared_dpll *
2336 cnl_get_dpll(struct intel_crtc_state *crtc_state,
2337 	     struct intel_encoder *encoder)
2338 {
2339 	struct intel_shared_dpll *pll;
2340 	bool bret;
2341 
2342 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2343 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2344 		if (!bret) {
2345 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2346 			return NULL;
2347 		}
2348 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2349 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2350 		if (!bret) {
2351 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2352 			return NULL;
2353 		}
2354 	} else {
2355 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2356 			      crtc_state->output_types);
2357 		return NULL;
2358 	}
2359 
2360 	pll = intel_find_shared_dpll(crtc_state,
2361 				     DPLL_ID_SKL_DPLL0,
2362 				     DPLL_ID_SKL_DPLL2);
2363 	if (!pll) {
2364 		DRM_DEBUG_KMS("No PLL selected\n");
2365 		return NULL;
2366 	}
2367 
2368 	intel_reference_shared_dpll(pll, crtc_state);
2369 
2370 	return pll;
2371 }
2372 
2373 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2374 			      const struct intel_dpll_hw_state *hw_state)
2375 {
2376 	DRM_DEBUG_KMS("dpll_hw_state: "
2377 		      "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2378 		      hw_state->cfgcr0,
2379 		      hw_state->cfgcr1);
2380 }
2381 
2382 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2383 	.enable = cnl_ddi_pll_enable,
2384 	.disable = cnl_ddi_pll_disable,
2385 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2386 };
2387 
2388 static const struct dpll_info cnl_plls[] = {
2389 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2390 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2391 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2392 	{ },
2393 };
2394 
2395 static const struct intel_dpll_mgr cnl_pll_mgr = {
2396 	.dpll_info = cnl_plls,
2397 	.get_dpll = cnl_get_dpll,
2398 	.dump_hw_state = cnl_dump_hw_state,
2399 };
2400 
2401 struct icl_combo_pll_params {
2402 	int clock;
2403 	struct skl_wrpll_params wrpll;
2404 };
2405 
2406 /*
2407  * These values alrea already adjusted: they're the bits we write to the
2408  * registers, not the logical values.
2409  */
2410 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2411 	{ 540000,
2412 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2413 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2414 	{ 270000,
2415 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2416 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2417 	{ 162000,
2418 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2419 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2420 	{ 324000,
2421 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2422 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2423 	{ 216000,
2424 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2425 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2426 	{ 432000,
2427 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2428 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2429 	{ 648000,
2430 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2431 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2432 	{ 810000,
2433 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2434 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2435 };
2436 
2437 
2438 /* Also used for 38.4 MHz values. */
2439 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2440 	{ 540000,
2441 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2442 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2443 	{ 270000,
2444 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2445 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2446 	{ 162000,
2447 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2448 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2449 	{ 324000,
2450 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2451 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2452 	{ 216000,
2453 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2454 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2455 	{ 432000,
2456 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2457 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2458 	{ 648000,
2459 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2460 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2461 	{ 810000,
2462 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2463 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2464 };
2465 
2466 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2467 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2468 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2469 };
2470 
2471 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2472 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2473 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2474 };
2475 
2476 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2477 				  struct skl_wrpll_params *pll_params)
2478 {
2479 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2480 	const struct icl_combo_pll_params *params =
2481 		dev_priv->cdclk.hw.ref == 24000 ?
2482 		icl_dp_combo_pll_24MHz_values :
2483 		icl_dp_combo_pll_19_2MHz_values;
2484 	int clock = crtc_state->port_clock;
2485 	int i;
2486 
2487 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2488 		if (clock == params[i].clock) {
2489 			*pll_params = params[i].wrpll;
2490 			return true;
2491 		}
2492 	}
2493 
2494 	MISSING_CASE(clock);
2495 	return false;
2496 }
2497 
2498 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2499 			     struct skl_wrpll_params *pll_params)
2500 {
2501 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2502 
2503 	*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2504 			icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2505 	return true;
2506 }
2507 
2508 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2509 				struct intel_encoder *encoder)
2510 {
2511 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2512 	u32 cfgcr0, cfgcr1;
2513 	struct skl_wrpll_params pll_params = { 0 };
2514 	bool ret;
2515 
2516 	if (intel_port_is_tc(dev_priv, encoder->port))
2517 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2518 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2519 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2520 		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2521 	else
2522 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2523 
2524 	if (!ret)
2525 		return false;
2526 
2527 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2528 		 pll_params.dco_integer;
2529 
2530 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2531 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2532 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2533 		 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
2534 		 DPLL_CFGCR1_CENTRAL_FREQ_8400;
2535 
2536 	memset(&crtc_state->dpll_hw_state, 0,
2537 	       sizeof(crtc_state->dpll_hw_state));
2538 
2539 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2540 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2541 
2542 	return true;
2543 }
2544 
2545 
2546 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2547 {
2548 	return id - DPLL_ID_ICL_MGPLL1;
2549 }
2550 
2551 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2552 {
2553 	return tc_port + DPLL_ID_ICL_MGPLL1;
2554 }
2555 
2556 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2557 				     u32 *target_dco_khz,
2558 				     struct intel_dpll_hw_state *state)
2559 {
2560 	u32 dco_min_freq, dco_max_freq;
2561 	int div1_vals[] = {7, 5, 3, 2};
2562 	unsigned int i;
2563 	int div2;
2564 
2565 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2566 	dco_max_freq = is_dp ? 8100000 : 10000000;
2567 
2568 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2569 		int div1 = div1_vals[i];
2570 
2571 		for (div2 = 10; div2 > 0; div2--) {
2572 			int dco = div1 * div2 * clock_khz * 5;
2573 			int a_divratio, tlinedrv, inputsel;
2574 			u32 hsdiv;
2575 
2576 			if (dco < dco_min_freq || dco > dco_max_freq)
2577 				continue;
2578 
2579 			if (div2 >= 2) {
2580 				a_divratio = is_dp ? 10 : 5;
2581 				tlinedrv = 2;
2582 			} else {
2583 				a_divratio = 5;
2584 				tlinedrv = 0;
2585 			}
2586 			inputsel = is_dp ? 0 : 1;
2587 
2588 			switch (div1) {
2589 			default:
2590 				MISSING_CASE(div1);
2591 				/* fall through */
2592 			case 2:
2593 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2594 				break;
2595 			case 3:
2596 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2597 				break;
2598 			case 5:
2599 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2600 				break;
2601 			case 7:
2602 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2603 				break;
2604 			}
2605 
2606 			*target_dco_khz = dco;
2607 
2608 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2609 
2610 			state->mg_clktop2_coreclkctl1 =
2611 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2612 
2613 			state->mg_clktop2_hsclkctl =
2614 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2615 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2616 				hsdiv |
2617 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2618 
2619 			return true;
2620 		}
2621 	}
2622 
2623 	return false;
2624 }
2625 
2626 /*
2627  * The specification for this function uses real numbers, so the math had to be
2628  * adapted to integer-only calculation, that's why it looks so different.
2629  */
2630 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
2631 {
2632 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2633 	struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
2634 	int refclk_khz = dev_priv->cdclk.hw.ref;
2635 	int clock = crtc_state->port_clock;
2636 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2637 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2638 	u32 prop_coeff, int_coeff;
2639 	u32 tdc_targetcnt, feedfwgain;
2640 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2641 	u64 tmp;
2642 	bool use_ssc = false;
2643 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2644 
2645 	memset(pll_state, 0, sizeof(*pll_state));
2646 
2647 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2648 				      pll_state)) {
2649 		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2650 		return false;
2651 	}
2652 
2653 	m1div = 2;
2654 	m2div_int = dco_khz / (refclk_khz * m1div);
2655 	if (m2div_int > 255) {
2656 		m1div = 4;
2657 		m2div_int = dco_khz / (refclk_khz * m1div);
2658 		if (m2div_int > 255) {
2659 			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2660 				      clock);
2661 			return false;
2662 		}
2663 	}
2664 	m2div_rem = dco_khz % (refclk_khz * m1div);
2665 
2666 	tmp = (u64)m2div_rem * (1 << 22);
2667 	do_div(tmp, refclk_khz * m1div);
2668 	m2div_frac = tmp;
2669 
2670 	switch (refclk_khz) {
2671 	case 19200:
2672 		iref_ndiv = 1;
2673 		iref_trim = 28;
2674 		iref_pulse_w = 1;
2675 		break;
2676 	case 24000:
2677 		iref_ndiv = 1;
2678 		iref_trim = 25;
2679 		iref_pulse_w = 2;
2680 		break;
2681 	case 38400:
2682 		iref_ndiv = 2;
2683 		iref_trim = 28;
2684 		iref_pulse_w = 1;
2685 		break;
2686 	default:
2687 		MISSING_CASE(refclk_khz);
2688 		return false;
2689 	}
2690 
2691 	/*
2692 	 * tdc_res = 0.000003
2693 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2694 	 *
2695 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2696 	 * was supposed to be a division, but we rearranged the operations of
2697 	 * the formula to avoid early divisions so we don't multiply the
2698 	 * rounding errors.
2699 	 *
2700 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2701 	 * we also rearrange to work with integers.
2702 	 *
2703 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2704 	 * last division by 10.
2705 	 */
2706 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2707 
2708 	/*
2709 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2710 	 * 32 bits. That's not a problem since we round the division down
2711 	 * anyway.
2712 	 */
2713 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2714 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2715 
2716 	if (dco_khz >= 9000000) {
2717 		prop_coeff = 5;
2718 		int_coeff = 10;
2719 	} else {
2720 		prop_coeff = 4;
2721 		int_coeff = 8;
2722 	}
2723 
2724 	if (use_ssc) {
2725 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2726 		do_div(tmp, refclk_khz * m1div * 10000);
2727 		ssc_stepsize = tmp;
2728 
2729 		tmp = mul_u32_u32(dco_khz, 1000);
2730 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2731 	} else {
2732 		ssc_stepsize = 0;
2733 		ssc_steplen = 0;
2734 	}
2735 	ssc_steplog = 4;
2736 
2737 	pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2738 				  MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2739 				  MG_PLL_DIV0_FBDIV_INT(m2div_int);
2740 
2741 	pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2742 				 MG_PLL_DIV1_DITHER_DIV_2 |
2743 				 MG_PLL_DIV1_NDIVRATIO(1) |
2744 				 MG_PLL_DIV1_FBPREDIV(m1div);
2745 
2746 	pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2747 			       MG_PLL_LF_AFCCNTSEL_512 |
2748 			       MG_PLL_LF_GAINCTRL(1) |
2749 			       MG_PLL_LF_INT_COEFF(int_coeff) |
2750 			       MG_PLL_LF_PROP_COEFF(prop_coeff);
2751 
2752 	pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2753 				      MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2754 				      MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2755 				      MG_PLL_FRAC_LOCK_DCODITHEREN |
2756 				      MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2757 	if (use_ssc || m2div_rem > 0)
2758 		pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2759 
2760 	pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2761 				MG_PLL_SSC_TYPE(2) |
2762 				MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2763 				MG_PLL_SSC_STEPNUM(ssc_steplog) |
2764 				MG_PLL_SSC_FLLEN |
2765 				MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2766 
2767 	pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2768 					    MG_PLL_TDC_COLDST_IREFINT_EN |
2769 					    MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2770 					    MG_PLL_TDC_TDCOVCCORR_EN |
2771 					    MG_PLL_TDC_TDCSEL(3);
2772 
2773 	pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2774 				 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2775 				 MG_PLL_BIAS_BIAS_BONUS(10) |
2776 				 MG_PLL_BIAS_BIASCAL_EN |
2777 				 MG_PLL_BIAS_CTRIM(12) |
2778 				 MG_PLL_BIAS_VREF_RDAC(4) |
2779 				 MG_PLL_BIAS_IREFTRIM(iref_trim);
2780 
2781 	if (refclk_khz == 38400) {
2782 		pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2783 		pll_state->mg_pll_bias_mask = 0;
2784 	} else {
2785 		pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2786 		pll_state->mg_pll_bias_mask = -1U;
2787 	}
2788 
2789 	pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2790 	pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2791 
2792 	return true;
2793 }
2794 
2795 static struct intel_shared_dpll *
2796 icl_get_dpll(struct intel_crtc_state *crtc_state,
2797 	     struct intel_encoder *encoder)
2798 {
2799 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2800 	struct intel_digital_port *intel_dig_port;
2801 	struct intel_shared_dpll *pll;
2802 	enum port port = encoder->port;
2803 	enum intel_dpll_id min, max;
2804 	bool ret;
2805 
2806 	if (intel_port_is_combophy(dev_priv, port)) {
2807 		min = DPLL_ID_ICL_DPLL0;
2808 		max = DPLL_ID_ICL_DPLL1;
2809 		ret = icl_calc_dpll_state(crtc_state, encoder);
2810 	} else if (intel_port_is_tc(dev_priv, port)) {
2811 		if (encoder->type == INTEL_OUTPUT_DP_MST) {
2812 			struct intel_dp_mst_encoder *mst_encoder;
2813 
2814 			mst_encoder = enc_to_mst(&encoder->base);
2815 			intel_dig_port = mst_encoder->primary;
2816 		} else {
2817 			intel_dig_port = enc_to_dig_port(&encoder->base);
2818 		}
2819 
2820 		if (intel_dig_port->tc_type == TC_PORT_TBT) {
2821 			min = DPLL_ID_ICL_TBTPLL;
2822 			max = min;
2823 			ret = icl_calc_dpll_state(crtc_state, encoder);
2824 		} else {
2825 			enum tc_port tc_port;
2826 
2827 			tc_port = intel_port_to_tc(dev_priv, port);
2828 			min = icl_tc_port_to_pll_id(tc_port);
2829 			max = min;
2830 			ret = icl_calc_mg_pll_state(crtc_state);
2831 		}
2832 	} else {
2833 		MISSING_CASE(port);
2834 		return NULL;
2835 	}
2836 
2837 	if (!ret) {
2838 		DRM_DEBUG_KMS("Could not calculate PLL state.\n");
2839 		return NULL;
2840 	}
2841 
2842 
2843 	pll = intel_find_shared_dpll(crtc_state, min, max);
2844 	if (!pll) {
2845 		DRM_DEBUG_KMS("No PLL selected\n");
2846 		return NULL;
2847 	}
2848 
2849 	intel_reference_shared_dpll(pll, crtc_state);
2850 
2851 	return pll;
2852 }
2853 
2854 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
2855 				struct intel_shared_dpll *pll,
2856 				struct intel_dpll_hw_state *hw_state)
2857 {
2858 	const enum intel_dpll_id id = pll->info->id;
2859 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
2860 	intel_wakeref_t wakeref;
2861 	bool ret = false;
2862 	u32 val;
2863 
2864 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2865 						     POWER_DOMAIN_DISPLAY_CORE);
2866 	if (!wakeref)
2867 		return false;
2868 
2869 	val = I915_READ(MG_PLL_ENABLE(tc_port));
2870 	if (!(val & PLL_ENABLE))
2871 		goto out;
2872 
2873 	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
2874 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
2875 
2876 	hw_state->mg_clktop2_coreclkctl1 =
2877 		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
2878 	hw_state->mg_clktop2_coreclkctl1 &=
2879 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
2880 
2881 	hw_state->mg_clktop2_hsclkctl =
2882 		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
2883 	hw_state->mg_clktop2_hsclkctl &=
2884 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
2885 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
2886 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
2887 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
2888 
2889 	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
2890 	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
2891 	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
2892 	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
2893 	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
2894 
2895 	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
2896 	hw_state->mg_pll_tdc_coldst_bias =
2897 		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
2898 
2899 	if (dev_priv->cdclk.hw.ref == 38400) {
2900 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2901 		hw_state->mg_pll_bias_mask = 0;
2902 	} else {
2903 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
2904 		hw_state->mg_pll_bias_mask = -1U;
2905 	}
2906 
2907 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
2908 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
2909 
2910 	ret = true;
2911 out:
2912 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2913 	return ret;
2914 }
2915 
2916 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
2917 				 struct intel_shared_dpll *pll,
2918 				 struct intel_dpll_hw_state *hw_state,
2919 				 i915_reg_t enable_reg)
2920 {
2921 	const enum intel_dpll_id id = pll->info->id;
2922 	intel_wakeref_t wakeref;
2923 	bool ret = false;
2924 	u32 val;
2925 
2926 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2927 						     POWER_DOMAIN_DISPLAY_CORE);
2928 	if (!wakeref)
2929 		return false;
2930 
2931 	val = I915_READ(enable_reg);
2932 	if (!(val & PLL_ENABLE))
2933 		goto out;
2934 
2935 	hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
2936 	hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
2937 
2938 	ret = true;
2939 out:
2940 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2941 	return ret;
2942 }
2943 
2944 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
2945 				   struct intel_shared_dpll *pll,
2946 				   struct intel_dpll_hw_state *hw_state)
2947 {
2948 	return icl_pll_get_hw_state(dev_priv, pll, hw_state,
2949 				    CNL_DPLL_ENABLE(pll->info->id));
2950 }
2951 
2952 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
2953 				 struct intel_shared_dpll *pll,
2954 				 struct intel_dpll_hw_state *hw_state)
2955 {
2956 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
2957 }
2958 
2959 static void icl_dpll_write(struct drm_i915_private *dev_priv,
2960 			   struct intel_shared_dpll *pll)
2961 {
2962 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
2963 	const enum intel_dpll_id id = pll->info->id;
2964 
2965 	I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
2966 	I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
2967 	POSTING_READ(ICL_DPLL_CFGCR1(id));
2968 }
2969 
2970 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
2971 			     struct intel_shared_dpll *pll)
2972 {
2973 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
2974 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
2975 	u32 val;
2976 
2977 	/*
2978 	 * Some of the following registers have reserved fields, so program
2979 	 * these with RMW based on a mask. The mask can be fixed or generated
2980 	 * during the calc/readout phase if the mask depends on some other HW
2981 	 * state like refclk, see icl_calc_mg_pll_state().
2982 	 */
2983 	val = I915_READ(MG_REFCLKIN_CTL(tc_port));
2984 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
2985 	val |= hw_state->mg_refclkin_ctl;
2986 	I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
2987 
2988 	val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
2989 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
2990 	val |= hw_state->mg_clktop2_coreclkctl1;
2991 	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
2992 
2993 	val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
2994 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
2995 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
2996 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
2997 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
2998 	val |= hw_state->mg_clktop2_hsclkctl;
2999 	I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3000 
3001 	I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3002 	I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3003 	I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3004 	I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3005 	I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3006 
3007 	val = I915_READ(MG_PLL_BIAS(tc_port));
3008 	val &= ~hw_state->mg_pll_bias_mask;
3009 	val |= hw_state->mg_pll_bias;
3010 	I915_WRITE(MG_PLL_BIAS(tc_port), val);
3011 
3012 	val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3013 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3014 	val |= hw_state->mg_pll_tdc_coldst_bias;
3015 	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3016 
3017 	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3018 }
3019 
3020 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3021 				 struct intel_shared_dpll *pll,
3022 				 i915_reg_t enable_reg)
3023 {
3024 	u32 val;
3025 
3026 	val = I915_READ(enable_reg);
3027 	val |= PLL_POWER_ENABLE;
3028 	I915_WRITE(enable_reg, val);
3029 
3030 	/*
3031 	 * The spec says we need to "wait" but it also says it should be
3032 	 * immediate.
3033 	 */
3034 	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
3035 				    PLL_POWER_STATE, PLL_POWER_STATE, 1))
3036 		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3037 }
3038 
3039 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3040 			   struct intel_shared_dpll *pll,
3041 			   i915_reg_t enable_reg)
3042 {
3043 	u32 val;
3044 
3045 	val = I915_READ(enable_reg);
3046 	val |= PLL_ENABLE;
3047 	I915_WRITE(enable_reg, val);
3048 
3049 	/* Timeout is actually 600us. */
3050 	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
3051 				    PLL_LOCK, PLL_LOCK, 1))
3052 		DRM_ERROR("PLL %d not locked\n", pll->info->id);
3053 }
3054 
3055 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3056 			     struct intel_shared_dpll *pll)
3057 {
3058 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3059 
3060 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3061 
3062 	icl_dpll_write(dev_priv, pll);
3063 
3064 	/*
3065 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3066 	 * paths should already be setting the appropriate voltage, hence we do
3067 	 * nothing here.
3068 	 */
3069 
3070 	icl_pll_enable(dev_priv, pll, enable_reg);
3071 
3072 	/* DVFS post sequence would be here. See the comment above. */
3073 }
3074 
3075 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3076 			   struct intel_shared_dpll *pll)
3077 {
3078 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3079 
3080 	icl_dpll_write(dev_priv, pll);
3081 
3082 	/*
3083 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3084 	 * paths should already be setting the appropriate voltage, hence we do
3085 	 * nothing here.
3086 	 */
3087 
3088 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3089 
3090 	/* DVFS post sequence would be here. See the comment above. */
3091 }
3092 
3093 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3094 			  struct intel_shared_dpll *pll)
3095 {
3096 	i915_reg_t enable_reg =
3097 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3098 
3099 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3100 
3101 	icl_mg_pll_write(dev_priv, pll);
3102 
3103 	/*
3104 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3105 	 * paths should already be setting the appropriate voltage, hence we do
3106 	 * nothing here.
3107 	 */
3108 
3109 	icl_pll_enable(dev_priv, pll, enable_reg);
3110 
3111 	/* DVFS post sequence would be here. See the comment above. */
3112 }
3113 
3114 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3115 			    struct intel_shared_dpll *pll,
3116 			    i915_reg_t enable_reg)
3117 {
3118 	u32 val;
3119 
3120 	/* The first steps are done by intel_ddi_post_disable(). */
3121 
3122 	/*
3123 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3124 	 * paths should already be setting the appropriate voltage, hence we do
3125 	 * nothign here.
3126 	 */
3127 
3128 	val = I915_READ(enable_reg);
3129 	val &= ~PLL_ENABLE;
3130 	I915_WRITE(enable_reg, val);
3131 
3132 	/* Timeout is actually 1us. */
3133 	if (intel_wait_for_register(&dev_priv->uncore,
3134 				    enable_reg, PLL_LOCK, 0, 1))
3135 		DRM_ERROR("PLL %d locked\n", pll->info->id);
3136 
3137 	/* DVFS post sequence would be here. See the comment above. */
3138 
3139 	val = I915_READ(enable_reg);
3140 	val &= ~PLL_POWER_ENABLE;
3141 	I915_WRITE(enable_reg, val);
3142 
3143 	/*
3144 	 * The spec says we need to "wait" but it also says it should be
3145 	 * immediate.
3146 	 */
3147 	if (intel_wait_for_register(&dev_priv->uncore,
3148 				    enable_reg, PLL_POWER_STATE, 0, 1))
3149 		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3150 }
3151 
3152 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3153 			      struct intel_shared_dpll *pll)
3154 {
3155 	icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
3156 }
3157 
3158 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3159 			    struct intel_shared_dpll *pll)
3160 {
3161 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3162 }
3163 
3164 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3165 			   struct intel_shared_dpll *pll)
3166 {
3167 	i915_reg_t enable_reg =
3168 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3169 
3170 	icl_pll_disable(dev_priv, pll, enable_reg);
3171 }
3172 
3173 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3174 			      const struct intel_dpll_hw_state *hw_state)
3175 {
3176 	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3177 		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3178 		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3179 		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3180 		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3181 		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3182 		      hw_state->cfgcr0, hw_state->cfgcr1,
3183 		      hw_state->mg_refclkin_ctl,
3184 		      hw_state->mg_clktop2_coreclkctl1,
3185 		      hw_state->mg_clktop2_hsclkctl,
3186 		      hw_state->mg_pll_div0,
3187 		      hw_state->mg_pll_div1,
3188 		      hw_state->mg_pll_lf,
3189 		      hw_state->mg_pll_frac_lock,
3190 		      hw_state->mg_pll_ssc,
3191 		      hw_state->mg_pll_bias,
3192 		      hw_state->mg_pll_tdc_coldst_bias);
3193 }
3194 
3195 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3196 	.enable = combo_pll_enable,
3197 	.disable = combo_pll_disable,
3198 	.get_hw_state = combo_pll_get_hw_state,
3199 };
3200 
3201 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3202 	.enable = tbt_pll_enable,
3203 	.disable = tbt_pll_disable,
3204 	.get_hw_state = tbt_pll_get_hw_state,
3205 };
3206 
3207 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3208 	.enable = mg_pll_enable,
3209 	.disable = mg_pll_disable,
3210 	.get_hw_state = mg_pll_get_hw_state,
3211 };
3212 
3213 static const struct dpll_info icl_plls[] = {
3214 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3215 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3216 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3217 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3218 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3219 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3220 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3221 	{ },
3222 };
3223 
3224 static const struct intel_dpll_mgr icl_pll_mgr = {
3225 	.dpll_info = icl_plls,
3226 	.get_dpll = icl_get_dpll,
3227 	.dump_hw_state = icl_dump_hw_state,
3228 };
3229 
3230 static const struct dpll_info ehl_plls[] = {
3231 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3232 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3233 	{ },
3234 };
3235 
3236 static const struct intel_dpll_mgr ehl_pll_mgr = {
3237 	.dpll_info = ehl_plls,
3238 	.get_dpll = icl_get_dpll,
3239 	.dump_hw_state = icl_dump_hw_state,
3240 };
3241 
3242 /**
3243  * intel_shared_dpll_init - Initialize shared DPLLs
3244  * @dev: drm device
3245  *
3246  * Initialize shared DPLLs for @dev.
3247  */
3248 void intel_shared_dpll_init(struct drm_device *dev)
3249 {
3250 	struct drm_i915_private *dev_priv = to_i915(dev);
3251 	const struct intel_dpll_mgr *dpll_mgr = NULL;
3252 	const struct dpll_info *dpll_info;
3253 	int i;
3254 
3255 	if (IS_ELKHARTLAKE(dev_priv))
3256 		dpll_mgr = &ehl_pll_mgr;
3257 	else if (INTEL_GEN(dev_priv) >= 11)
3258 		dpll_mgr = &icl_pll_mgr;
3259 	else if (IS_CANNONLAKE(dev_priv))
3260 		dpll_mgr = &cnl_pll_mgr;
3261 	else if (IS_GEN9_BC(dev_priv))
3262 		dpll_mgr = &skl_pll_mgr;
3263 	else if (IS_GEN9_LP(dev_priv))
3264 		dpll_mgr = &bxt_pll_mgr;
3265 	else if (HAS_DDI(dev_priv))
3266 		dpll_mgr = &hsw_pll_mgr;
3267 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3268 		dpll_mgr = &pch_pll_mgr;
3269 
3270 	if (!dpll_mgr) {
3271 		dev_priv->num_shared_dpll = 0;
3272 		return;
3273 	}
3274 
3275 	dpll_info = dpll_mgr->dpll_info;
3276 
3277 	for (i = 0; dpll_info[i].name; i++) {
3278 		WARN_ON(i != dpll_info[i].id);
3279 		dev_priv->shared_dplls[i].info = &dpll_info[i];
3280 	}
3281 
3282 	dev_priv->dpll_mgr = dpll_mgr;
3283 	dev_priv->num_shared_dpll = i;
3284 	mutex_init(&dev_priv->dpll_lock);
3285 
3286 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3287 }
3288 
3289 /**
3290  * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
3291  * @crtc_state: atomic state for the crtc
3292  * @encoder: encoder
3293  *
3294  * Find an appropriate DPLL for the given CRTC and encoder combination. A
3295  * reference from the @crtc_state to the returned pll is registered in the
3296  * atomic state. That configuration is made effective by calling
3297  * intel_shared_dpll_swap_state(). The reference should be released by calling
3298  * intel_release_shared_dpll().
3299  *
3300  * Returns:
3301  * A shared DPLL to be used by @crtc_state and @encoder.
3302  */
3303 struct intel_shared_dpll *
3304 intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
3305 		      struct intel_encoder *encoder)
3306 {
3307 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3308 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3309 
3310 	if (WARN_ON(!dpll_mgr))
3311 		return NULL;
3312 
3313 	return dpll_mgr->get_dpll(crtc_state, encoder);
3314 }
3315 
3316 /**
3317  * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
3318  * @dpll: dpll in use by @crtc
3319  * @crtc: crtc
3320  * @state: atomic state
3321  *
3322  * This function releases the reference from @crtc to @dpll from the
3323  * atomic @state. The new configuration is made effective by calling
3324  * intel_shared_dpll_swap_state().
3325  */
3326 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
3327 			       struct intel_crtc *crtc,
3328 			       struct drm_atomic_state *state)
3329 {
3330 	struct intel_shared_dpll_state *shared_dpll_state;
3331 
3332 	shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
3333 	shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
3334 }
3335 
3336 /**
3337  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3338  * @dev_priv: i915 drm device
3339  * @hw_state: hw state to be written to the log
3340  *
3341  * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3342  */
3343 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3344 			      const struct intel_dpll_hw_state *hw_state)
3345 {
3346 	if (dev_priv->dpll_mgr) {
3347 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3348 	} else {
3349 		/* fallback for platforms that don't use the shared dpll
3350 		 * infrastructure
3351 		 */
3352 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3353 			      "fp0: 0x%x, fp1: 0x%x\n",
3354 			      hw_state->dpll,
3355 			      hw_state->dpll_md,
3356 			      hw_state->fp0,
3357 			      hw_state->fp1);
3358 	}
3359 }
3360