1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 static void
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 				  struct intel_shared_dpll_state *shared_dpll)
51 {
52 	enum intel_dpll_id i;
53 
54 	/* Copy shared dpll state */
55 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
57 
58 		shared_dpll[i] = pll->state;
59 	}
60 }
61 
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
64 {
65 	struct intel_atomic_state *state = to_intel_atomic_state(s);
66 
67 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
68 
69 	if (!state->dpll_set) {
70 		state->dpll_set = true;
71 
72 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 						  state->shared_dpll);
74 	}
75 
76 	return state->shared_dpll;
77 }
78 
79 /**
80  * intel_get_shared_dpll_by_id - get a DPLL given its id
81  * @dev_priv: i915 device instance
82  * @id: pll id
83  *
84  * Returns:
85  * A pointer to the DPLL with @id
86  */
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 			    enum intel_dpll_id id)
90 {
91 	return &dev_priv->shared_dplls[id];
92 }
93 
94 /**
95  * intel_get_shared_dpll_id - get the id of a DPLL
96  * @dev_priv: i915 device instance
97  * @pll: the DPLL
98  *
99  * Returns:
100  * The id of @pll
101  */
102 enum intel_dpll_id
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 			 struct intel_shared_dpll *pll)
105 {
106 	if (WARN_ON(pll < dev_priv->shared_dplls||
107 		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
108 		return -1;
109 
110 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 }
112 
113 /* For ILK+ */
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 			struct intel_shared_dpll *pll,
116 			bool state)
117 {
118 	bool cur_state;
119 	struct intel_dpll_hw_state hw_state;
120 
121 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
122 		return;
123 
124 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
125 	I915_STATE_WARN(cur_state != state,
126 	     "%s assertion failure (expected %s, current %s)\n",
127 			pll->info->name, onoff(state), onoff(cur_state));
128 }
129 
130 /**
131  * intel_prepare_shared_dpll - call a dpll's prepare hook
132  * @crtc_state: CRTC, and its state, which has a shared dpll
133  *
134  * This calls the PLL's prepare hook if it has one and if the PLL is not
135  * already enabled. The prepare hook is platform specific.
136  */
137 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
138 {
139 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
140 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
141 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
142 
143 	if (WARN_ON(pll == NULL))
144 		return;
145 
146 	mutex_lock(&dev_priv->dpll_lock);
147 	WARN_ON(!pll->state.crtc_mask);
148 	if (!pll->active_mask) {
149 		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
150 		WARN_ON(pll->on);
151 		assert_shared_dpll_disabled(dev_priv, pll);
152 
153 		pll->info->funcs->prepare(dev_priv, pll);
154 	}
155 	mutex_unlock(&dev_priv->dpll_lock);
156 }
157 
158 /**
159  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
160  * @crtc_state: CRTC, and its state, which has a shared DPLL
161  *
162  * Enable the shared DPLL used by @crtc.
163  */
164 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
165 {
166 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
167 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
168 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
169 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
170 	unsigned int old_mask;
171 
172 	if (WARN_ON(pll == NULL))
173 		return;
174 
175 	mutex_lock(&dev_priv->dpll_lock);
176 	old_mask = pll->active_mask;
177 
178 	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
179 	    WARN_ON(pll->active_mask & crtc_mask))
180 		goto out;
181 
182 	pll->active_mask |= crtc_mask;
183 
184 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
185 		      pll->info->name, pll->active_mask, pll->on,
186 		      crtc->base.base.id);
187 
188 	if (old_mask) {
189 		WARN_ON(!pll->on);
190 		assert_shared_dpll_enabled(dev_priv, pll);
191 		goto out;
192 	}
193 	WARN_ON(pll->on);
194 
195 	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
196 	pll->info->funcs->enable(dev_priv, pll);
197 	pll->on = true;
198 
199 out:
200 	mutex_unlock(&dev_priv->dpll_lock);
201 }
202 
203 /**
204  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
205  * @crtc_state: CRTC, and its state, which has a shared DPLL
206  *
207  * Disable the shared DPLL used by @crtc.
208  */
209 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
210 {
211 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
212 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
213 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
214 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
215 
216 	/* PCH only available on ILK+ */
217 	if (INTEL_GEN(dev_priv) < 5)
218 		return;
219 
220 	if (pll == NULL)
221 		return;
222 
223 	mutex_lock(&dev_priv->dpll_lock);
224 	if (WARN_ON(!(pll->active_mask & crtc_mask)))
225 		goto out;
226 
227 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
228 		      pll->info->name, pll->active_mask, pll->on,
229 		      crtc->base.base.id);
230 
231 	assert_shared_dpll_enabled(dev_priv, pll);
232 	WARN_ON(!pll->on);
233 
234 	pll->active_mask &= ~crtc_mask;
235 	if (pll->active_mask)
236 		goto out;
237 
238 	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
239 	pll->info->funcs->disable(dev_priv, pll);
240 	pll->on = false;
241 
242 out:
243 	mutex_unlock(&dev_priv->dpll_lock);
244 }
245 
246 static struct intel_shared_dpll *
247 intel_find_shared_dpll(struct intel_atomic_state *state,
248 		       const struct intel_crtc *crtc,
249 		       const struct intel_dpll_hw_state *pll_state,
250 		       unsigned long dpll_mask)
251 {
252 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
253 	struct intel_shared_dpll *pll, *unused_pll = NULL;
254 	struct intel_shared_dpll_state *shared_dpll;
255 	enum intel_dpll_id i;
256 
257 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
258 
259 	WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
260 
261 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
262 		pll = &dev_priv->shared_dplls[i];
263 
264 		/* Only want to check enabled timings first */
265 		if (shared_dpll[i].crtc_mask == 0) {
266 			if (!unused_pll)
267 				unused_pll = pll;
268 			continue;
269 		}
270 
271 		if (memcmp(pll_state,
272 			   &shared_dpll[i].hw_state,
273 			   sizeof(*pll_state)) == 0) {
274 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
275 				      crtc->base.base.id, crtc->base.name,
276 				      pll->info->name,
277 				      shared_dpll[i].crtc_mask,
278 				      pll->active_mask);
279 			return pll;
280 		}
281 	}
282 
283 	/* Ok no matching timings, maybe there's a free one? */
284 	if (unused_pll) {
285 		DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
286 			      crtc->base.base.id, crtc->base.name,
287 			      unused_pll->info->name);
288 		return unused_pll;
289 	}
290 
291 	return NULL;
292 }
293 
294 static void
295 intel_reference_shared_dpll(struct intel_atomic_state *state,
296 			    const struct intel_crtc *crtc,
297 			    const struct intel_shared_dpll *pll,
298 			    const struct intel_dpll_hw_state *pll_state)
299 {
300 	struct intel_shared_dpll_state *shared_dpll;
301 	const enum intel_dpll_id id = pll->info->id;
302 
303 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
304 
305 	if (shared_dpll[id].crtc_mask == 0)
306 		shared_dpll[id].hw_state = *pll_state;
307 
308 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
309 			 pipe_name(crtc->pipe));
310 
311 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
312 }
313 
314 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
315 					  const struct intel_crtc *crtc,
316 					  const struct intel_shared_dpll *pll)
317 {
318 	struct intel_shared_dpll_state *shared_dpll;
319 
320 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
321 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
322 }
323 
324 static void intel_put_dpll(struct intel_atomic_state *state,
325 			   struct intel_crtc *crtc)
326 {
327 	const struct intel_crtc_state *old_crtc_state =
328 		intel_atomic_get_old_crtc_state(state, crtc);
329 	struct intel_crtc_state *new_crtc_state =
330 		intel_atomic_get_new_crtc_state(state, crtc);
331 
332 	new_crtc_state->shared_dpll = NULL;
333 
334 	if (!old_crtc_state->shared_dpll)
335 		return;
336 
337 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
338 }
339 
340 /**
341  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
342  * @state: atomic state
343  *
344  * This is the dpll version of drm_atomic_helper_swap_state() since the
345  * helper does not handle driver-specific global state.
346  *
347  * For consistency with atomic helpers this function does a complete swap,
348  * i.e. it also puts the current state into @state, even though there is no
349  * need for that at this moment.
350  */
351 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
352 {
353 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
354 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
355 	enum intel_dpll_id i;
356 
357 	if (!state->dpll_set)
358 		return;
359 
360 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
361 		struct intel_shared_dpll *pll =
362 			&dev_priv->shared_dplls[i];
363 
364 		swap(pll->state, shared_dpll[i]);
365 	}
366 }
367 
368 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
369 				      struct intel_shared_dpll *pll,
370 				      struct intel_dpll_hw_state *hw_state)
371 {
372 	const enum intel_dpll_id id = pll->info->id;
373 	intel_wakeref_t wakeref;
374 	u32 val;
375 
376 	wakeref = intel_display_power_get_if_enabled(dev_priv,
377 						     POWER_DOMAIN_DISPLAY_CORE);
378 	if (!wakeref)
379 		return false;
380 
381 	val = I915_READ(PCH_DPLL(id));
382 	hw_state->dpll = val;
383 	hw_state->fp0 = I915_READ(PCH_FP0(id));
384 	hw_state->fp1 = I915_READ(PCH_FP1(id));
385 
386 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
387 
388 	return val & DPLL_VCO_ENABLE;
389 }
390 
391 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
392 				 struct intel_shared_dpll *pll)
393 {
394 	const enum intel_dpll_id id = pll->info->id;
395 
396 	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
397 	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
398 }
399 
400 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
401 {
402 	u32 val;
403 	bool enabled;
404 
405 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
406 
407 	val = I915_READ(PCH_DREF_CONTROL);
408 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
409 			    DREF_SUPERSPREAD_SOURCE_MASK));
410 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
411 }
412 
413 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
414 				struct intel_shared_dpll *pll)
415 {
416 	const enum intel_dpll_id id = pll->info->id;
417 
418 	/* PCH refclock must be enabled first */
419 	ibx_assert_pch_refclk_enabled(dev_priv);
420 
421 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
422 
423 	/* Wait for the clocks to stabilize. */
424 	POSTING_READ(PCH_DPLL(id));
425 	udelay(150);
426 
427 	/* The pixel multiplier can only be updated once the
428 	 * DPLL is enabled and the clocks are stable.
429 	 *
430 	 * So write it again.
431 	 */
432 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
433 	POSTING_READ(PCH_DPLL(id));
434 	udelay(200);
435 }
436 
437 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
438 				 struct intel_shared_dpll *pll)
439 {
440 	const enum intel_dpll_id id = pll->info->id;
441 
442 	I915_WRITE(PCH_DPLL(id), 0);
443 	POSTING_READ(PCH_DPLL(id));
444 	udelay(200);
445 }
446 
447 static bool ibx_get_dpll(struct intel_atomic_state *state,
448 			 struct intel_crtc *crtc,
449 			 struct intel_encoder *encoder)
450 {
451 	struct intel_crtc_state *crtc_state =
452 		intel_atomic_get_new_crtc_state(state, crtc);
453 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
454 	struct intel_shared_dpll *pll;
455 	enum intel_dpll_id i;
456 
457 	if (HAS_PCH_IBX(dev_priv)) {
458 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
459 		i = (enum intel_dpll_id) crtc->pipe;
460 		pll = &dev_priv->shared_dplls[i];
461 
462 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
463 			      crtc->base.base.id, crtc->base.name,
464 			      pll->info->name);
465 	} else {
466 		pll = intel_find_shared_dpll(state, crtc,
467 					     &crtc_state->dpll_hw_state,
468 					     BIT(DPLL_ID_PCH_PLL_B) |
469 					     BIT(DPLL_ID_PCH_PLL_A));
470 	}
471 
472 	if (!pll)
473 		return false;
474 
475 	/* reference the pll */
476 	intel_reference_shared_dpll(state, crtc,
477 				    pll, &crtc_state->dpll_hw_state);
478 
479 	crtc_state->shared_dpll = pll;
480 
481 	return true;
482 }
483 
484 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
485 			      const struct intel_dpll_hw_state *hw_state)
486 {
487 	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
488 		      "fp0: 0x%x, fp1: 0x%x\n",
489 		      hw_state->dpll,
490 		      hw_state->dpll_md,
491 		      hw_state->fp0,
492 		      hw_state->fp1);
493 }
494 
495 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
496 	.prepare = ibx_pch_dpll_prepare,
497 	.enable = ibx_pch_dpll_enable,
498 	.disable = ibx_pch_dpll_disable,
499 	.get_hw_state = ibx_pch_dpll_get_hw_state,
500 };
501 
502 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
503 			       struct intel_shared_dpll *pll)
504 {
505 	const enum intel_dpll_id id = pll->info->id;
506 
507 	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
508 	POSTING_READ(WRPLL_CTL(id));
509 	udelay(20);
510 }
511 
512 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
513 				struct intel_shared_dpll *pll)
514 {
515 	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
516 	POSTING_READ(SPLL_CTL);
517 	udelay(20);
518 }
519 
520 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
521 				  struct intel_shared_dpll *pll)
522 {
523 	const enum intel_dpll_id id = pll->info->id;
524 	u32 val;
525 
526 	val = I915_READ(WRPLL_CTL(id));
527 	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
528 	POSTING_READ(WRPLL_CTL(id));
529 
530 	/*
531 	 * Try to set up the PCH reference clock once all DPLLs
532 	 * that depend on it have been shut down.
533 	 */
534 	if (dev_priv->pch_ssc_use & BIT(id))
535 		intel_init_pch_refclk(dev_priv);
536 }
537 
538 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
539 				 struct intel_shared_dpll *pll)
540 {
541 	enum intel_dpll_id id = pll->info->id;
542 	u32 val;
543 
544 	val = I915_READ(SPLL_CTL);
545 	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
546 	POSTING_READ(SPLL_CTL);
547 
548 	/*
549 	 * Try to set up the PCH reference clock once all DPLLs
550 	 * that depend on it have been shut down.
551 	 */
552 	if (dev_priv->pch_ssc_use & BIT(id))
553 		intel_init_pch_refclk(dev_priv);
554 }
555 
556 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
557 				       struct intel_shared_dpll *pll,
558 				       struct intel_dpll_hw_state *hw_state)
559 {
560 	const enum intel_dpll_id id = pll->info->id;
561 	intel_wakeref_t wakeref;
562 	u32 val;
563 
564 	wakeref = intel_display_power_get_if_enabled(dev_priv,
565 						     POWER_DOMAIN_DISPLAY_CORE);
566 	if (!wakeref)
567 		return false;
568 
569 	val = I915_READ(WRPLL_CTL(id));
570 	hw_state->wrpll = val;
571 
572 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
573 
574 	return val & WRPLL_PLL_ENABLE;
575 }
576 
577 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
578 				      struct intel_shared_dpll *pll,
579 				      struct intel_dpll_hw_state *hw_state)
580 {
581 	intel_wakeref_t wakeref;
582 	u32 val;
583 
584 	wakeref = intel_display_power_get_if_enabled(dev_priv,
585 						     POWER_DOMAIN_DISPLAY_CORE);
586 	if (!wakeref)
587 		return false;
588 
589 	val = I915_READ(SPLL_CTL);
590 	hw_state->spll = val;
591 
592 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
593 
594 	return val & SPLL_PLL_ENABLE;
595 }
596 
597 #define LC_FREQ 2700
598 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
599 
600 #define P_MIN 2
601 #define P_MAX 64
602 #define P_INC 2
603 
604 /* Constraints for PLL good behavior */
605 #define REF_MIN 48
606 #define REF_MAX 400
607 #define VCO_MIN 2400
608 #define VCO_MAX 4800
609 
610 struct hsw_wrpll_rnp {
611 	unsigned p, n2, r2;
612 };
613 
614 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
615 {
616 	unsigned budget;
617 
618 	switch (clock) {
619 	case 25175000:
620 	case 25200000:
621 	case 27000000:
622 	case 27027000:
623 	case 37762500:
624 	case 37800000:
625 	case 40500000:
626 	case 40541000:
627 	case 54000000:
628 	case 54054000:
629 	case 59341000:
630 	case 59400000:
631 	case 72000000:
632 	case 74176000:
633 	case 74250000:
634 	case 81000000:
635 	case 81081000:
636 	case 89012000:
637 	case 89100000:
638 	case 108000000:
639 	case 108108000:
640 	case 111264000:
641 	case 111375000:
642 	case 148352000:
643 	case 148500000:
644 	case 162000000:
645 	case 162162000:
646 	case 222525000:
647 	case 222750000:
648 	case 296703000:
649 	case 297000000:
650 		budget = 0;
651 		break;
652 	case 233500000:
653 	case 245250000:
654 	case 247750000:
655 	case 253250000:
656 	case 298000000:
657 		budget = 1500;
658 		break;
659 	case 169128000:
660 	case 169500000:
661 	case 179500000:
662 	case 202000000:
663 		budget = 2000;
664 		break;
665 	case 256250000:
666 	case 262500000:
667 	case 270000000:
668 	case 272500000:
669 	case 273750000:
670 	case 280750000:
671 	case 281250000:
672 	case 286000000:
673 	case 291750000:
674 		budget = 4000;
675 		break;
676 	case 267250000:
677 	case 268500000:
678 		budget = 5000;
679 		break;
680 	default:
681 		budget = 1000;
682 		break;
683 	}
684 
685 	return budget;
686 }
687 
688 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
689 				 unsigned int r2, unsigned int n2,
690 				 unsigned int p,
691 				 struct hsw_wrpll_rnp *best)
692 {
693 	u64 a, b, c, d, diff, diff_best;
694 
695 	/* No best (r,n,p) yet */
696 	if (best->p == 0) {
697 		best->p = p;
698 		best->n2 = n2;
699 		best->r2 = r2;
700 		return;
701 	}
702 
703 	/*
704 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
705 	 * freq2k.
706 	 *
707 	 * delta = 1e6 *
708 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
709 	 *	   freq2k;
710 	 *
711 	 * and we would like delta <= budget.
712 	 *
713 	 * If the discrepancy is above the PPM-based budget, always prefer to
714 	 * improve upon the previous solution.  However, if you're within the
715 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
716 	 */
717 	a = freq2k * budget * p * r2;
718 	b = freq2k * budget * best->p * best->r2;
719 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
720 	diff_best = abs_diff(freq2k * best->p * best->r2,
721 			     LC_FREQ_2K * best->n2);
722 	c = 1000000 * diff;
723 	d = 1000000 * diff_best;
724 
725 	if (a < c && b < d) {
726 		/* If both are above the budget, pick the closer */
727 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
728 			best->p = p;
729 			best->n2 = n2;
730 			best->r2 = r2;
731 		}
732 	} else if (a >= c && b < d) {
733 		/* If A is below the threshold but B is above it?  Update. */
734 		best->p = p;
735 		best->n2 = n2;
736 		best->r2 = r2;
737 	} else if (a >= c && b >= d) {
738 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
739 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
740 			best->p = p;
741 			best->n2 = n2;
742 			best->r2 = r2;
743 		}
744 	}
745 	/* Otherwise a < c && b >= d, do nothing */
746 }
747 
748 static void
749 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
750 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
751 {
752 	u64 freq2k;
753 	unsigned p, n2, r2;
754 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
755 	unsigned budget;
756 
757 	freq2k = clock / 100;
758 
759 	budget = hsw_wrpll_get_budget_for_freq(clock);
760 
761 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
762 	 * and directly pass the LC PLL to it. */
763 	if (freq2k == 5400000) {
764 		*n2_out = 2;
765 		*p_out = 1;
766 		*r2_out = 2;
767 		return;
768 	}
769 
770 	/*
771 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
772 	 * the WR PLL.
773 	 *
774 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
775 	 * Injecting R2 = 2 * R gives:
776 	 *   REF_MAX * r2 > LC_FREQ * 2 and
777 	 *   REF_MIN * r2 < LC_FREQ * 2
778 	 *
779 	 * Which means the desired boundaries for r2 are:
780 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
781 	 *
782 	 */
783 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
784 	     r2 <= LC_FREQ * 2 / REF_MIN;
785 	     r2++) {
786 
787 		/*
788 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
789 		 *
790 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
791 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
792 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
793 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
794 		 *
795 		 * Which means the desired boundaries for n2 are:
796 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
797 		 */
798 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
799 		     n2 <= VCO_MAX * r2 / LC_FREQ;
800 		     n2++) {
801 
802 			for (p = P_MIN; p <= P_MAX; p += P_INC)
803 				hsw_wrpll_update_rnp(freq2k, budget,
804 						     r2, n2, p, &best);
805 		}
806 	}
807 
808 	*n2_out = best.n2;
809 	*p_out = best.p;
810 	*r2_out = best.r2;
811 }
812 
813 static struct intel_shared_dpll *
814 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
815 		      struct intel_crtc *crtc)
816 {
817 	struct intel_crtc_state *crtc_state =
818 		intel_atomic_get_new_crtc_state(state, crtc);
819 	struct intel_shared_dpll *pll;
820 	u32 val;
821 	unsigned int p, n2, r2;
822 
823 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
824 
825 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
826 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
827 	      WRPLL_DIVIDER_POST(p);
828 
829 	crtc_state->dpll_hw_state.wrpll = val;
830 
831 	pll = intel_find_shared_dpll(state, crtc,
832 				     &crtc_state->dpll_hw_state,
833 				     BIT(DPLL_ID_WRPLL2) |
834 				     BIT(DPLL_ID_WRPLL1));
835 
836 	if (!pll)
837 		return NULL;
838 
839 	return pll;
840 }
841 
842 static struct intel_shared_dpll *
843 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
844 {
845 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
846 	struct intel_shared_dpll *pll;
847 	enum intel_dpll_id pll_id;
848 	int clock = crtc_state->port_clock;
849 
850 	switch (clock / 2) {
851 	case 81000:
852 		pll_id = DPLL_ID_LCPLL_810;
853 		break;
854 	case 135000:
855 		pll_id = DPLL_ID_LCPLL_1350;
856 		break;
857 	case 270000:
858 		pll_id = DPLL_ID_LCPLL_2700;
859 		break;
860 	default:
861 		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
862 		return NULL;
863 	}
864 
865 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
866 
867 	if (!pll)
868 		return NULL;
869 
870 	return pll;
871 }
872 
873 static bool hsw_get_dpll(struct intel_atomic_state *state,
874 			 struct intel_crtc *crtc,
875 			 struct intel_encoder *encoder)
876 {
877 	struct intel_crtc_state *crtc_state =
878 		intel_atomic_get_new_crtc_state(state, crtc);
879 	struct intel_shared_dpll *pll;
880 
881 	memset(&crtc_state->dpll_hw_state, 0,
882 	       sizeof(crtc_state->dpll_hw_state));
883 
884 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
885 		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
886 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
887 		pll = hsw_ddi_dp_get_dpll(crtc_state);
888 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
889 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
890 			return false;
891 
892 		crtc_state->dpll_hw_state.spll =
893 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
894 
895 		pll = intel_find_shared_dpll(state, crtc,
896 					     &crtc_state->dpll_hw_state,
897 					     BIT(DPLL_ID_SPLL));
898 	} else {
899 		return false;
900 	}
901 
902 	if (!pll)
903 		return false;
904 
905 	intel_reference_shared_dpll(state, crtc,
906 				    pll, &crtc_state->dpll_hw_state);
907 
908 	crtc_state->shared_dpll = pll;
909 
910 	return true;
911 }
912 
913 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
914 			      const struct intel_dpll_hw_state *hw_state)
915 {
916 	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
917 		      hw_state->wrpll, hw_state->spll);
918 }
919 
920 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
921 	.enable = hsw_ddi_wrpll_enable,
922 	.disable = hsw_ddi_wrpll_disable,
923 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
924 };
925 
926 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
927 	.enable = hsw_ddi_spll_enable,
928 	.disable = hsw_ddi_spll_disable,
929 	.get_hw_state = hsw_ddi_spll_get_hw_state,
930 };
931 
932 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
933 				 struct intel_shared_dpll *pll)
934 {
935 }
936 
937 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
938 				  struct intel_shared_dpll *pll)
939 {
940 }
941 
942 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
943 				       struct intel_shared_dpll *pll,
944 				       struct intel_dpll_hw_state *hw_state)
945 {
946 	return true;
947 }
948 
949 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
950 	.enable = hsw_ddi_lcpll_enable,
951 	.disable = hsw_ddi_lcpll_disable,
952 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
953 };
954 
955 struct skl_dpll_regs {
956 	i915_reg_t ctl, cfgcr1, cfgcr2;
957 };
958 
959 /* this array is indexed by the *shared* pll id */
960 static const struct skl_dpll_regs skl_dpll_regs[4] = {
961 	{
962 		/* DPLL 0 */
963 		.ctl = LCPLL1_CTL,
964 		/* DPLL 0 doesn't support HDMI mode */
965 	},
966 	{
967 		/* DPLL 1 */
968 		.ctl = LCPLL2_CTL,
969 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
970 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
971 	},
972 	{
973 		/* DPLL 2 */
974 		.ctl = WRPLL_CTL(0),
975 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
976 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
977 	},
978 	{
979 		/* DPLL 3 */
980 		.ctl = WRPLL_CTL(1),
981 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
982 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
983 	},
984 };
985 
986 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
987 				    struct intel_shared_dpll *pll)
988 {
989 	const enum intel_dpll_id id = pll->info->id;
990 	u32 val;
991 
992 	val = I915_READ(DPLL_CTRL1);
993 
994 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
995 		 DPLL_CTRL1_SSC(id) |
996 		 DPLL_CTRL1_LINK_RATE_MASK(id));
997 	val |= pll->state.hw_state.ctrl1 << (id * 6);
998 
999 	I915_WRITE(DPLL_CTRL1, val);
1000 	POSTING_READ(DPLL_CTRL1);
1001 }
1002 
1003 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1004 			       struct intel_shared_dpll *pll)
1005 {
1006 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1007 	const enum intel_dpll_id id = pll->info->id;
1008 
1009 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1010 
1011 	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1012 	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1013 	POSTING_READ(regs[id].cfgcr1);
1014 	POSTING_READ(regs[id].cfgcr2);
1015 
1016 	/* the enable bit is always bit 31 */
1017 	I915_WRITE(regs[id].ctl,
1018 		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
1019 
1020 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1021 		DRM_ERROR("DPLL %d not locked\n", id);
1022 }
1023 
1024 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1025 				 struct intel_shared_dpll *pll)
1026 {
1027 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1028 }
1029 
1030 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1031 				struct intel_shared_dpll *pll)
1032 {
1033 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1034 	const enum intel_dpll_id id = pll->info->id;
1035 
1036 	/* the enable bit is always bit 31 */
1037 	I915_WRITE(regs[id].ctl,
1038 		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1039 	POSTING_READ(regs[id].ctl);
1040 }
1041 
1042 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1043 				  struct intel_shared_dpll *pll)
1044 {
1045 }
1046 
1047 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1048 				     struct intel_shared_dpll *pll,
1049 				     struct intel_dpll_hw_state *hw_state)
1050 {
1051 	u32 val;
1052 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1053 	const enum intel_dpll_id id = pll->info->id;
1054 	intel_wakeref_t wakeref;
1055 	bool ret;
1056 
1057 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1058 						     POWER_DOMAIN_DISPLAY_CORE);
1059 	if (!wakeref)
1060 		return false;
1061 
1062 	ret = false;
1063 
1064 	val = I915_READ(regs[id].ctl);
1065 	if (!(val & LCPLL_PLL_ENABLE))
1066 		goto out;
1067 
1068 	val = I915_READ(DPLL_CTRL1);
1069 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1070 
1071 	/* avoid reading back stale values if HDMI mode is not enabled */
1072 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1073 		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1074 		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1075 	}
1076 	ret = true;
1077 
1078 out:
1079 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1080 
1081 	return ret;
1082 }
1083 
1084 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1085 				       struct intel_shared_dpll *pll,
1086 				       struct intel_dpll_hw_state *hw_state)
1087 {
1088 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1089 	const enum intel_dpll_id id = pll->info->id;
1090 	intel_wakeref_t wakeref;
1091 	u32 val;
1092 	bool ret;
1093 
1094 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1095 						     POWER_DOMAIN_DISPLAY_CORE);
1096 	if (!wakeref)
1097 		return false;
1098 
1099 	ret = false;
1100 
1101 	/* DPLL0 is always enabled since it drives CDCLK */
1102 	val = I915_READ(regs[id].ctl);
1103 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1104 		goto out;
1105 
1106 	val = I915_READ(DPLL_CTRL1);
1107 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1108 
1109 	ret = true;
1110 
1111 out:
1112 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1113 
1114 	return ret;
1115 }
1116 
1117 struct skl_wrpll_context {
1118 	u64 min_deviation;		/* current minimal deviation */
1119 	u64 central_freq;		/* chosen central freq */
1120 	u64 dco_freq;			/* chosen dco freq */
1121 	unsigned int p;			/* chosen divider */
1122 };
1123 
1124 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1125 {
1126 	memset(ctx, 0, sizeof(*ctx));
1127 
1128 	ctx->min_deviation = U64_MAX;
1129 }
1130 
1131 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1132 #define SKL_DCO_MAX_PDEVIATION	100
1133 #define SKL_DCO_MAX_NDEVIATION	600
1134 
1135 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1136 				  u64 central_freq,
1137 				  u64 dco_freq,
1138 				  unsigned int divider)
1139 {
1140 	u64 deviation;
1141 
1142 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1143 			      central_freq);
1144 
1145 	/* positive deviation */
1146 	if (dco_freq >= central_freq) {
1147 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1148 		    deviation < ctx->min_deviation) {
1149 			ctx->min_deviation = deviation;
1150 			ctx->central_freq = central_freq;
1151 			ctx->dco_freq = dco_freq;
1152 			ctx->p = divider;
1153 		}
1154 	/* negative deviation */
1155 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1156 		   deviation < ctx->min_deviation) {
1157 		ctx->min_deviation = deviation;
1158 		ctx->central_freq = central_freq;
1159 		ctx->dco_freq = dco_freq;
1160 		ctx->p = divider;
1161 	}
1162 }
1163 
1164 static void skl_wrpll_get_multipliers(unsigned int p,
1165 				      unsigned int *p0 /* out */,
1166 				      unsigned int *p1 /* out */,
1167 				      unsigned int *p2 /* out */)
1168 {
1169 	/* even dividers */
1170 	if (p % 2 == 0) {
1171 		unsigned int half = p / 2;
1172 
1173 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1174 			*p0 = 2;
1175 			*p1 = 1;
1176 			*p2 = half;
1177 		} else if (half % 2 == 0) {
1178 			*p0 = 2;
1179 			*p1 = half / 2;
1180 			*p2 = 2;
1181 		} else if (half % 3 == 0) {
1182 			*p0 = 3;
1183 			*p1 = half / 3;
1184 			*p2 = 2;
1185 		} else if (half % 7 == 0) {
1186 			*p0 = 7;
1187 			*p1 = half / 7;
1188 			*p2 = 2;
1189 		}
1190 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1191 		*p0 = 3;
1192 		*p1 = 1;
1193 		*p2 = p / 3;
1194 	} else if (p == 5 || p == 7) {
1195 		*p0 = p;
1196 		*p1 = 1;
1197 		*p2 = 1;
1198 	} else if (p == 15) {
1199 		*p0 = 3;
1200 		*p1 = 1;
1201 		*p2 = 5;
1202 	} else if (p == 21) {
1203 		*p0 = 7;
1204 		*p1 = 1;
1205 		*p2 = 3;
1206 	} else if (p == 35) {
1207 		*p0 = 7;
1208 		*p1 = 1;
1209 		*p2 = 5;
1210 	}
1211 }
1212 
1213 struct skl_wrpll_params {
1214 	u32 dco_fraction;
1215 	u32 dco_integer;
1216 	u32 qdiv_ratio;
1217 	u32 qdiv_mode;
1218 	u32 kdiv;
1219 	u32 pdiv;
1220 	u32 central_freq;
1221 };
1222 
1223 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1224 				      u64 afe_clock,
1225 				      u64 central_freq,
1226 				      u32 p0, u32 p1, u32 p2)
1227 {
1228 	u64 dco_freq;
1229 
1230 	switch (central_freq) {
1231 	case 9600000000ULL:
1232 		params->central_freq = 0;
1233 		break;
1234 	case 9000000000ULL:
1235 		params->central_freq = 1;
1236 		break;
1237 	case 8400000000ULL:
1238 		params->central_freq = 3;
1239 	}
1240 
1241 	switch (p0) {
1242 	case 1:
1243 		params->pdiv = 0;
1244 		break;
1245 	case 2:
1246 		params->pdiv = 1;
1247 		break;
1248 	case 3:
1249 		params->pdiv = 2;
1250 		break;
1251 	case 7:
1252 		params->pdiv = 4;
1253 		break;
1254 	default:
1255 		WARN(1, "Incorrect PDiv\n");
1256 	}
1257 
1258 	switch (p2) {
1259 	case 5:
1260 		params->kdiv = 0;
1261 		break;
1262 	case 2:
1263 		params->kdiv = 1;
1264 		break;
1265 	case 3:
1266 		params->kdiv = 2;
1267 		break;
1268 	case 1:
1269 		params->kdiv = 3;
1270 		break;
1271 	default:
1272 		WARN(1, "Incorrect KDiv\n");
1273 	}
1274 
1275 	params->qdiv_ratio = p1;
1276 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1277 
1278 	dco_freq = p0 * p1 * p2 * afe_clock;
1279 
1280 	/*
1281 	 * Intermediate values are in Hz.
1282 	 * Divide by MHz to match bsepc
1283 	 */
1284 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1285 	params->dco_fraction =
1286 		div_u64((div_u64(dco_freq, 24) -
1287 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1288 }
1289 
1290 static bool
1291 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1292 			struct skl_wrpll_params *wrpll_params)
1293 {
1294 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1295 	u64 dco_central_freq[3] = { 8400000000ULL,
1296 				    9000000000ULL,
1297 				    9600000000ULL };
1298 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1299 					     24, 28, 30, 32, 36, 40, 42, 44,
1300 					     48, 52, 54, 56, 60, 64, 66, 68,
1301 					     70, 72, 76, 78, 80, 84, 88, 90,
1302 					     92, 96, 98 };
1303 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1304 	static const struct {
1305 		const int *list;
1306 		int n_dividers;
1307 	} dividers[] = {
1308 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1309 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1310 	};
1311 	struct skl_wrpll_context ctx;
1312 	unsigned int dco, d, i;
1313 	unsigned int p0, p1, p2;
1314 
1315 	skl_wrpll_context_init(&ctx);
1316 
1317 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1318 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1319 			for (i = 0; i < dividers[d].n_dividers; i++) {
1320 				unsigned int p = dividers[d].list[i];
1321 				u64 dco_freq = p * afe_clock;
1322 
1323 				skl_wrpll_try_divider(&ctx,
1324 						      dco_central_freq[dco],
1325 						      dco_freq,
1326 						      p);
1327 				/*
1328 				 * Skip the remaining dividers if we're sure to
1329 				 * have found the definitive divider, we can't
1330 				 * improve a 0 deviation.
1331 				 */
1332 				if (ctx.min_deviation == 0)
1333 					goto skip_remaining_dividers;
1334 			}
1335 		}
1336 
1337 skip_remaining_dividers:
1338 		/*
1339 		 * If a solution is found with an even divider, prefer
1340 		 * this one.
1341 		 */
1342 		if (d == 0 && ctx.p)
1343 			break;
1344 	}
1345 
1346 	if (!ctx.p) {
1347 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1348 		return false;
1349 	}
1350 
1351 	/*
1352 	 * gcc incorrectly analyses that these can be used without being
1353 	 * initialized. To be fair, it's hard to guess.
1354 	 */
1355 	p0 = p1 = p2 = 0;
1356 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1357 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1358 				  p0, p1, p2);
1359 
1360 	return true;
1361 }
1362 
1363 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1364 {
1365 	u32 ctrl1, cfgcr1, cfgcr2;
1366 	struct skl_wrpll_params wrpll_params = { 0, };
1367 
1368 	/*
1369 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1370 	 * as the DPLL id in this function.
1371 	 */
1372 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1373 
1374 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1375 
1376 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1377 				     &wrpll_params))
1378 		return false;
1379 
1380 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1381 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1382 		wrpll_params.dco_integer;
1383 
1384 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1385 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1386 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1387 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1388 		wrpll_params.central_freq;
1389 
1390 	memset(&crtc_state->dpll_hw_state, 0,
1391 	       sizeof(crtc_state->dpll_hw_state));
1392 
1393 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1394 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1395 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1396 	return true;
1397 }
1398 
1399 static bool
1400 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1401 {
1402 	u32 ctrl1;
1403 
1404 	/*
1405 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1406 	 * as the DPLL id in this function.
1407 	 */
1408 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1409 	switch (crtc_state->port_clock / 2) {
1410 	case 81000:
1411 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1412 		break;
1413 	case 135000:
1414 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1415 		break;
1416 	case 270000:
1417 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1418 		break;
1419 		/* eDP 1.4 rates */
1420 	case 162000:
1421 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1422 		break;
1423 	case 108000:
1424 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1425 		break;
1426 	case 216000:
1427 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1428 		break;
1429 	}
1430 
1431 	memset(&crtc_state->dpll_hw_state, 0,
1432 	       sizeof(crtc_state->dpll_hw_state));
1433 
1434 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1435 
1436 	return true;
1437 }
1438 
1439 static bool skl_get_dpll(struct intel_atomic_state *state,
1440 			 struct intel_crtc *crtc,
1441 			 struct intel_encoder *encoder)
1442 {
1443 	struct intel_crtc_state *crtc_state =
1444 		intel_atomic_get_new_crtc_state(state, crtc);
1445 	struct intel_shared_dpll *pll;
1446 	bool bret;
1447 
1448 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1449 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1450 		if (!bret) {
1451 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1452 			return false;
1453 		}
1454 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1455 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1456 		if (!bret) {
1457 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1458 			return false;
1459 		}
1460 	} else {
1461 		return false;
1462 	}
1463 
1464 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1465 		pll = intel_find_shared_dpll(state, crtc,
1466 					     &crtc_state->dpll_hw_state,
1467 					     BIT(DPLL_ID_SKL_DPLL0));
1468 	else
1469 		pll = intel_find_shared_dpll(state, crtc,
1470 					     &crtc_state->dpll_hw_state,
1471 					     BIT(DPLL_ID_SKL_DPLL3) |
1472 					     BIT(DPLL_ID_SKL_DPLL2) |
1473 					     BIT(DPLL_ID_SKL_DPLL1));
1474 	if (!pll)
1475 		return false;
1476 
1477 	intel_reference_shared_dpll(state, crtc,
1478 				    pll, &crtc_state->dpll_hw_state);
1479 
1480 	crtc_state->shared_dpll = pll;
1481 
1482 	return true;
1483 }
1484 
1485 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1486 			      const struct intel_dpll_hw_state *hw_state)
1487 {
1488 	DRM_DEBUG_KMS("dpll_hw_state: "
1489 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1490 		      hw_state->ctrl1,
1491 		      hw_state->cfgcr1,
1492 		      hw_state->cfgcr2);
1493 }
1494 
1495 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1496 	.enable = skl_ddi_pll_enable,
1497 	.disable = skl_ddi_pll_disable,
1498 	.get_hw_state = skl_ddi_pll_get_hw_state,
1499 };
1500 
1501 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1502 	.enable = skl_ddi_dpll0_enable,
1503 	.disable = skl_ddi_dpll0_disable,
1504 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1505 };
1506 
1507 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1508 				struct intel_shared_dpll *pll)
1509 {
1510 	u32 temp;
1511 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1512 	enum dpio_phy phy;
1513 	enum dpio_channel ch;
1514 
1515 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1516 
1517 	/* Non-SSC reference */
1518 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1519 	temp |= PORT_PLL_REF_SEL;
1520 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1521 
1522 	if (IS_GEMINILAKE(dev_priv)) {
1523 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1524 		temp |= PORT_PLL_POWER_ENABLE;
1525 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1526 
1527 		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1528 				 PORT_PLL_POWER_STATE), 200))
1529 			DRM_ERROR("Power state not set for PLL:%d\n", port);
1530 	}
1531 
1532 	/* Disable 10 bit clock */
1533 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1534 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1535 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1536 
1537 	/* Write P1 & P2 */
1538 	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1539 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1540 	temp |= pll->state.hw_state.ebb0;
1541 	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1542 
1543 	/* Write M2 integer */
1544 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1545 	temp &= ~PORT_PLL_M2_MASK;
1546 	temp |= pll->state.hw_state.pll0;
1547 	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1548 
1549 	/* Write N */
1550 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1551 	temp &= ~PORT_PLL_N_MASK;
1552 	temp |= pll->state.hw_state.pll1;
1553 	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1554 
1555 	/* Write M2 fraction */
1556 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1557 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1558 	temp |= pll->state.hw_state.pll2;
1559 	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1560 
1561 	/* Write M2 fraction enable */
1562 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1563 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1564 	temp |= pll->state.hw_state.pll3;
1565 	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1566 
1567 	/* Write coeff */
1568 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1569 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1570 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1571 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1572 	temp |= pll->state.hw_state.pll6;
1573 	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1574 
1575 	/* Write calibration val */
1576 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1577 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1578 	temp |= pll->state.hw_state.pll8;
1579 	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1580 
1581 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1582 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1583 	temp |= pll->state.hw_state.pll9;
1584 	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1585 
1586 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1587 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1588 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1589 	temp |= pll->state.hw_state.pll10;
1590 	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1591 
1592 	/* Recalibrate with new settings */
1593 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1594 	temp |= PORT_PLL_RECALIBRATE;
1595 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1596 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1597 	temp |= pll->state.hw_state.ebb4;
1598 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1599 
1600 	/* Enable PLL */
1601 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1602 	temp |= PORT_PLL_ENABLE;
1603 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1604 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1605 
1606 	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1607 			200))
1608 		DRM_ERROR("PLL %d not locked\n", port);
1609 
1610 	if (IS_GEMINILAKE(dev_priv)) {
1611 		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1612 		temp |= DCC_DELAY_RANGE_2;
1613 		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1614 	}
1615 
1616 	/*
1617 	 * While we write to the group register to program all lanes at once we
1618 	 * can read only lane registers and we pick lanes 0/1 for that.
1619 	 */
1620 	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1621 	temp &= ~LANE_STAGGER_MASK;
1622 	temp &= ~LANESTAGGER_STRAP_OVRD;
1623 	temp |= pll->state.hw_state.pcsdw12;
1624 	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1625 }
1626 
1627 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1628 					struct intel_shared_dpll *pll)
1629 {
1630 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1631 	u32 temp;
1632 
1633 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1634 	temp &= ~PORT_PLL_ENABLE;
1635 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1636 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1637 
1638 	if (IS_GEMINILAKE(dev_priv)) {
1639 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1640 		temp &= ~PORT_PLL_POWER_ENABLE;
1641 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1642 
1643 		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1644 				PORT_PLL_POWER_STATE), 200))
1645 			DRM_ERROR("Power state not reset for PLL:%d\n", port);
1646 	}
1647 }
1648 
1649 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1650 					struct intel_shared_dpll *pll,
1651 					struct intel_dpll_hw_state *hw_state)
1652 {
1653 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1654 	intel_wakeref_t wakeref;
1655 	enum dpio_phy phy;
1656 	enum dpio_channel ch;
1657 	u32 val;
1658 	bool ret;
1659 
1660 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1661 
1662 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1663 						     POWER_DOMAIN_DISPLAY_CORE);
1664 	if (!wakeref)
1665 		return false;
1666 
1667 	ret = false;
1668 
1669 	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1670 	if (!(val & PORT_PLL_ENABLE))
1671 		goto out;
1672 
1673 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1674 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1675 
1676 	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1677 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1678 
1679 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1680 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1681 
1682 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1683 	hw_state->pll1 &= PORT_PLL_N_MASK;
1684 
1685 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1686 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1687 
1688 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1689 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1690 
1691 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1692 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1693 			  PORT_PLL_INT_COEFF_MASK |
1694 			  PORT_PLL_GAIN_CTL_MASK;
1695 
1696 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1697 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1698 
1699 	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1700 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1701 
1702 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1703 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1704 			   PORT_PLL_DCO_AMP_MASK;
1705 
1706 	/*
1707 	 * While we write to the group register to program all lanes at once we
1708 	 * can read only lane registers. We configure all lanes the same way, so
1709 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1710 	 */
1711 	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1712 	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1713 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1714 				 hw_state->pcsdw12,
1715 				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1716 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1717 
1718 	ret = true;
1719 
1720 out:
1721 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1722 
1723 	return ret;
1724 }
1725 
1726 /* bxt clock parameters */
1727 struct bxt_clk_div {
1728 	int clock;
1729 	u32 p1;
1730 	u32 p2;
1731 	u32 m2_int;
1732 	u32 m2_frac;
1733 	bool m2_frac_en;
1734 	u32 n;
1735 
1736 	int vco;
1737 };
1738 
1739 /* pre-calculated values for DP linkrates */
1740 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1741 	{162000, 4, 2, 32, 1677722, 1, 1},
1742 	{270000, 4, 1, 27,       0, 0, 1},
1743 	{540000, 2, 1, 27,       0, 0, 1},
1744 	{216000, 3, 2, 32, 1677722, 1, 1},
1745 	{243000, 4, 1, 24, 1258291, 1, 1},
1746 	{324000, 4, 1, 32, 1677722, 1, 1},
1747 	{432000, 3, 1, 32, 1677722, 1, 1}
1748 };
1749 
1750 static bool
1751 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1752 			  struct bxt_clk_div *clk_div)
1753 {
1754 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1755 	struct dpll best_clock;
1756 
1757 	/* Calculate HDMI div */
1758 	/*
1759 	 * FIXME: tie the following calculation into
1760 	 * i9xx_crtc_compute_clock
1761 	 */
1762 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1763 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1764 				 crtc_state->port_clock,
1765 				 pipe_name(crtc->pipe));
1766 		return false;
1767 	}
1768 
1769 	clk_div->p1 = best_clock.p1;
1770 	clk_div->p2 = best_clock.p2;
1771 	WARN_ON(best_clock.m1 != 2);
1772 	clk_div->n = best_clock.n;
1773 	clk_div->m2_int = best_clock.m2 >> 22;
1774 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1775 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1776 
1777 	clk_div->vco = best_clock.vco;
1778 
1779 	return true;
1780 }
1781 
1782 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1783 				    struct bxt_clk_div *clk_div)
1784 {
1785 	int clock = crtc_state->port_clock;
1786 	int i;
1787 
1788 	*clk_div = bxt_dp_clk_val[0];
1789 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1790 		if (bxt_dp_clk_val[i].clock == clock) {
1791 			*clk_div = bxt_dp_clk_val[i];
1792 			break;
1793 		}
1794 	}
1795 
1796 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1797 }
1798 
1799 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1800 				      const struct bxt_clk_div *clk_div)
1801 {
1802 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1803 	int clock = crtc_state->port_clock;
1804 	int vco = clk_div->vco;
1805 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1806 	u32 lanestagger;
1807 
1808 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1809 
1810 	if (vco >= 6200000 && vco <= 6700000) {
1811 		prop_coef = 4;
1812 		int_coef = 9;
1813 		gain_ctl = 3;
1814 		targ_cnt = 8;
1815 	} else if ((vco > 5400000 && vco < 6200000) ||
1816 			(vco >= 4800000 && vco < 5400000)) {
1817 		prop_coef = 5;
1818 		int_coef = 11;
1819 		gain_ctl = 3;
1820 		targ_cnt = 9;
1821 	} else if (vco == 5400000) {
1822 		prop_coef = 3;
1823 		int_coef = 8;
1824 		gain_ctl = 1;
1825 		targ_cnt = 9;
1826 	} else {
1827 		DRM_ERROR("Invalid VCO\n");
1828 		return false;
1829 	}
1830 
1831 	if (clock > 270000)
1832 		lanestagger = 0x18;
1833 	else if (clock > 135000)
1834 		lanestagger = 0x0d;
1835 	else if (clock > 67000)
1836 		lanestagger = 0x07;
1837 	else if (clock > 33000)
1838 		lanestagger = 0x04;
1839 	else
1840 		lanestagger = 0x02;
1841 
1842 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1843 	dpll_hw_state->pll0 = clk_div->m2_int;
1844 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1845 	dpll_hw_state->pll2 = clk_div->m2_frac;
1846 
1847 	if (clk_div->m2_frac_en)
1848 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1849 
1850 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1851 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1852 
1853 	dpll_hw_state->pll8 = targ_cnt;
1854 
1855 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1856 
1857 	dpll_hw_state->pll10 =
1858 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1859 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1860 
1861 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1862 
1863 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1864 
1865 	return true;
1866 }
1867 
1868 static bool
1869 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1870 {
1871 	struct bxt_clk_div clk_div = {};
1872 
1873 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1874 
1875 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1876 }
1877 
1878 static bool
1879 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1880 {
1881 	struct bxt_clk_div clk_div = {};
1882 
1883 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1884 
1885 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1886 }
1887 
1888 static bool bxt_get_dpll(struct intel_atomic_state *state,
1889 			 struct intel_crtc *crtc,
1890 			 struct intel_encoder *encoder)
1891 {
1892 	struct intel_crtc_state *crtc_state =
1893 		intel_atomic_get_new_crtc_state(state, crtc);
1894 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1895 	struct intel_shared_dpll *pll;
1896 	enum intel_dpll_id id;
1897 
1898 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1899 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1900 		return false;
1901 
1902 	if (intel_crtc_has_dp_encoder(crtc_state) &&
1903 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1904 		return false;
1905 
1906 	/* 1:1 mapping between ports and PLLs */
1907 	id = (enum intel_dpll_id) encoder->port;
1908 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
1909 
1910 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1911 		      crtc->base.base.id, crtc->base.name, pll->info->name);
1912 
1913 	intel_reference_shared_dpll(state, crtc,
1914 				    pll, &crtc_state->dpll_hw_state);
1915 
1916 	crtc_state->shared_dpll = pll;
1917 
1918 	return true;
1919 }
1920 
1921 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1922 			      const struct intel_dpll_hw_state *hw_state)
1923 {
1924 	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1925 		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1926 		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1927 		      hw_state->ebb0,
1928 		      hw_state->ebb4,
1929 		      hw_state->pll0,
1930 		      hw_state->pll1,
1931 		      hw_state->pll2,
1932 		      hw_state->pll3,
1933 		      hw_state->pll6,
1934 		      hw_state->pll8,
1935 		      hw_state->pll9,
1936 		      hw_state->pll10,
1937 		      hw_state->pcsdw12);
1938 }
1939 
1940 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1941 	.enable = bxt_ddi_pll_enable,
1942 	.disable = bxt_ddi_pll_disable,
1943 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1944 };
1945 
1946 struct intel_dpll_mgr {
1947 	const struct dpll_info *dpll_info;
1948 
1949 	bool (*get_dplls)(struct intel_atomic_state *state,
1950 			  struct intel_crtc *crtc,
1951 			  struct intel_encoder *encoder);
1952 	void (*put_dplls)(struct intel_atomic_state *state,
1953 			  struct intel_crtc *crtc);
1954 	void (*update_active_dpll)(struct intel_atomic_state *state,
1955 				   struct intel_crtc *crtc,
1956 				   struct intel_encoder *encoder);
1957 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1958 			      const struct intel_dpll_hw_state *hw_state);
1959 };
1960 
1961 static const struct dpll_info pch_plls[] = {
1962 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1963 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1964 	{ },
1965 };
1966 
1967 static const struct intel_dpll_mgr pch_pll_mgr = {
1968 	.dpll_info = pch_plls,
1969 	.get_dplls = ibx_get_dpll,
1970 	.put_dplls = intel_put_dpll,
1971 	.dump_hw_state = ibx_dump_hw_state,
1972 };
1973 
1974 static const struct dpll_info hsw_plls[] = {
1975 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1976 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1977 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1978 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1979 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1980 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1981 	{ },
1982 };
1983 
1984 static const struct intel_dpll_mgr hsw_pll_mgr = {
1985 	.dpll_info = hsw_plls,
1986 	.get_dplls = hsw_get_dpll,
1987 	.put_dplls = intel_put_dpll,
1988 	.dump_hw_state = hsw_dump_hw_state,
1989 };
1990 
1991 static const struct dpll_info skl_plls[] = {
1992 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1993 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1994 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1995 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1996 	{ },
1997 };
1998 
1999 static const struct intel_dpll_mgr skl_pll_mgr = {
2000 	.dpll_info = skl_plls,
2001 	.get_dplls = skl_get_dpll,
2002 	.put_dplls = intel_put_dpll,
2003 	.dump_hw_state = skl_dump_hw_state,
2004 };
2005 
2006 static const struct dpll_info bxt_plls[] = {
2007 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2008 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2009 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2010 	{ },
2011 };
2012 
2013 static const struct intel_dpll_mgr bxt_pll_mgr = {
2014 	.dpll_info = bxt_plls,
2015 	.get_dplls = bxt_get_dpll,
2016 	.put_dplls = intel_put_dpll,
2017 	.dump_hw_state = bxt_dump_hw_state,
2018 };
2019 
2020 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2021 			       struct intel_shared_dpll *pll)
2022 {
2023 	const enum intel_dpll_id id = pll->info->id;
2024 	u32 val;
2025 
2026 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2027 	val = I915_READ(CNL_DPLL_ENABLE(id));
2028 	val |= PLL_POWER_ENABLE;
2029 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2030 
2031 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2032 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2033 				  PLL_POWER_STATE, 5))
2034 		DRM_ERROR("PLL %d Power not enabled\n", id);
2035 
2036 	/*
2037 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2038 	 * select DP mode, and set DP link rate.
2039 	 */
2040 	val = pll->state.hw_state.cfgcr0;
2041 	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2042 
2043 	/* 4. Reab back to ensure writes completed */
2044 	POSTING_READ(CNL_DPLL_CFGCR0(id));
2045 
2046 	/* 3. Configure DPLL_CFGCR0 */
2047 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2048 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2049 		val = pll->state.hw_state.cfgcr1;
2050 		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2051 		/* 4. Reab back to ensure writes completed */
2052 		POSTING_READ(CNL_DPLL_CFGCR1(id));
2053 	}
2054 
2055 	/*
2056 	 * 5. If the frequency will result in a change to the voltage
2057 	 * requirement, follow the Display Voltage Frequency Switching
2058 	 * Sequence Before Frequency Change
2059 	 *
2060 	 * Note: DVFS is actually handled via the cdclk code paths,
2061 	 * hence we do nothing here.
2062 	 */
2063 
2064 	/* 6. Enable DPLL in DPLL_ENABLE. */
2065 	val = I915_READ(CNL_DPLL_ENABLE(id));
2066 	val |= PLL_ENABLE;
2067 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2068 
2069 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2070 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2071 		DRM_ERROR("PLL %d not locked\n", id);
2072 
2073 	/*
2074 	 * 8. If the frequency will result in a change to the voltage
2075 	 * requirement, follow the Display Voltage Frequency Switching
2076 	 * Sequence After Frequency Change
2077 	 *
2078 	 * Note: DVFS is actually handled via the cdclk code paths,
2079 	 * hence we do nothing here.
2080 	 */
2081 
2082 	/*
2083 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2084 	 * Done at intel_ddi_clk_select
2085 	 */
2086 }
2087 
2088 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2089 				struct intel_shared_dpll *pll)
2090 {
2091 	const enum intel_dpll_id id = pll->info->id;
2092 	u32 val;
2093 
2094 	/*
2095 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2096 	 * Done at intel_ddi_post_disable
2097 	 */
2098 
2099 	/*
2100 	 * 2. If the frequency will result in a change to the voltage
2101 	 * requirement, follow the Display Voltage Frequency Switching
2102 	 * Sequence Before Frequency Change
2103 	 *
2104 	 * Note: DVFS is actually handled via the cdclk code paths,
2105 	 * hence we do nothing here.
2106 	 */
2107 
2108 	/* 3. Disable DPLL through DPLL_ENABLE. */
2109 	val = I915_READ(CNL_DPLL_ENABLE(id));
2110 	val &= ~PLL_ENABLE;
2111 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2112 
2113 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2114 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2115 		DRM_ERROR("PLL %d locked\n", id);
2116 
2117 	/*
2118 	 * 5. If the frequency will result in a change to the voltage
2119 	 * requirement, follow the Display Voltage Frequency Switching
2120 	 * Sequence After Frequency Change
2121 	 *
2122 	 * Note: DVFS is actually handled via the cdclk code paths,
2123 	 * hence we do nothing here.
2124 	 */
2125 
2126 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2127 	val = I915_READ(CNL_DPLL_ENABLE(id));
2128 	val &= ~PLL_POWER_ENABLE;
2129 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2130 
2131 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2132 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2133 				    PLL_POWER_STATE, 5))
2134 		DRM_ERROR("PLL %d Power not disabled\n", id);
2135 }
2136 
2137 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2138 				     struct intel_shared_dpll *pll,
2139 				     struct intel_dpll_hw_state *hw_state)
2140 {
2141 	const enum intel_dpll_id id = pll->info->id;
2142 	intel_wakeref_t wakeref;
2143 	u32 val;
2144 	bool ret;
2145 
2146 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2147 						     POWER_DOMAIN_DISPLAY_CORE);
2148 	if (!wakeref)
2149 		return false;
2150 
2151 	ret = false;
2152 
2153 	val = I915_READ(CNL_DPLL_ENABLE(id));
2154 	if (!(val & PLL_ENABLE))
2155 		goto out;
2156 
2157 	val = I915_READ(CNL_DPLL_CFGCR0(id));
2158 	hw_state->cfgcr0 = val;
2159 
2160 	/* avoid reading back stale values if HDMI mode is not enabled */
2161 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2162 		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2163 	}
2164 	ret = true;
2165 
2166 out:
2167 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2168 
2169 	return ret;
2170 }
2171 
2172 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2173 				      int *qdiv, int *kdiv)
2174 {
2175 	/* even dividers */
2176 	if (bestdiv % 2 == 0) {
2177 		if (bestdiv == 2) {
2178 			*pdiv = 2;
2179 			*qdiv = 1;
2180 			*kdiv = 1;
2181 		} else if (bestdiv % 4 == 0) {
2182 			*pdiv = 2;
2183 			*qdiv = bestdiv / 4;
2184 			*kdiv = 2;
2185 		} else if (bestdiv % 6 == 0) {
2186 			*pdiv = 3;
2187 			*qdiv = bestdiv / 6;
2188 			*kdiv = 2;
2189 		} else if (bestdiv % 5 == 0) {
2190 			*pdiv = 5;
2191 			*qdiv = bestdiv / 10;
2192 			*kdiv = 2;
2193 		} else if (bestdiv % 14 == 0) {
2194 			*pdiv = 7;
2195 			*qdiv = bestdiv / 14;
2196 			*kdiv = 2;
2197 		}
2198 	} else {
2199 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2200 			*pdiv = bestdiv;
2201 			*qdiv = 1;
2202 			*kdiv = 1;
2203 		} else { /* 9, 15, 21 */
2204 			*pdiv = bestdiv / 3;
2205 			*qdiv = 1;
2206 			*kdiv = 3;
2207 		}
2208 	}
2209 }
2210 
2211 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2212 				      u32 dco_freq, u32 ref_freq,
2213 				      int pdiv, int qdiv, int kdiv)
2214 {
2215 	u32 dco;
2216 
2217 	switch (kdiv) {
2218 	case 1:
2219 		params->kdiv = 1;
2220 		break;
2221 	case 2:
2222 		params->kdiv = 2;
2223 		break;
2224 	case 3:
2225 		params->kdiv = 4;
2226 		break;
2227 	default:
2228 		WARN(1, "Incorrect KDiv\n");
2229 	}
2230 
2231 	switch (pdiv) {
2232 	case 2:
2233 		params->pdiv = 1;
2234 		break;
2235 	case 3:
2236 		params->pdiv = 2;
2237 		break;
2238 	case 5:
2239 		params->pdiv = 4;
2240 		break;
2241 	case 7:
2242 		params->pdiv = 8;
2243 		break;
2244 	default:
2245 		WARN(1, "Incorrect PDiv\n");
2246 	}
2247 
2248 	WARN_ON(kdiv != 2 && qdiv != 1);
2249 
2250 	params->qdiv_ratio = qdiv;
2251 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2252 
2253 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2254 
2255 	params->dco_integer = dco >> 15;
2256 	params->dco_fraction = dco & 0x7fff;
2257 }
2258 
2259 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2260 {
2261 	int ref_clock = dev_priv->cdclk.hw.ref;
2262 
2263 	/*
2264 	 * For ICL+, the spec states: if reference frequency is 38.4,
2265 	 * use 19.2 because the DPLL automatically divides that by 2.
2266 	 */
2267 	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2268 		ref_clock = 19200;
2269 
2270 	return ref_clock;
2271 }
2272 
2273 static bool
2274 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2275 			struct skl_wrpll_params *wrpll_params)
2276 {
2277 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2278 	u32 afe_clock = crtc_state->port_clock * 5;
2279 	u32 ref_clock;
2280 	u32 dco_min = 7998000;
2281 	u32 dco_max = 10000000;
2282 	u32 dco_mid = (dco_min + dco_max) / 2;
2283 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2284 					 18, 20, 24, 28, 30, 32,  36,  40,
2285 					 42, 44, 48, 50, 52, 54,  56,  60,
2286 					 64, 66, 68, 70, 72, 76,  78,  80,
2287 					 84, 88, 90, 92, 96, 98, 100, 102,
2288 					  3,  5,  7,  9, 15, 21 };
2289 	u32 dco, best_dco = 0, dco_centrality = 0;
2290 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2291 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2292 
2293 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2294 		dco = afe_clock * dividers[d];
2295 
2296 		if ((dco <= dco_max) && (dco >= dco_min)) {
2297 			dco_centrality = abs(dco - dco_mid);
2298 
2299 			if (dco_centrality < best_dco_centrality) {
2300 				best_dco_centrality = dco_centrality;
2301 				best_div = dividers[d];
2302 				best_dco = dco;
2303 			}
2304 		}
2305 	}
2306 
2307 	if (best_div == 0)
2308 		return false;
2309 
2310 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2311 
2312 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2313 
2314 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2315 				  pdiv, qdiv, kdiv);
2316 
2317 	return true;
2318 }
2319 
2320 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2321 {
2322 	u32 cfgcr0, cfgcr1;
2323 	struct skl_wrpll_params wrpll_params = { 0, };
2324 
2325 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2326 
2327 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2328 		return false;
2329 
2330 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2331 		wrpll_params.dco_integer;
2332 
2333 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2334 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2335 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2336 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2337 		DPLL_CFGCR1_CENTRAL_FREQ;
2338 
2339 	memset(&crtc_state->dpll_hw_state, 0,
2340 	       sizeof(crtc_state->dpll_hw_state));
2341 
2342 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2343 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2344 	return true;
2345 }
2346 
2347 static bool
2348 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2349 {
2350 	u32 cfgcr0;
2351 
2352 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2353 
2354 	switch (crtc_state->port_clock / 2) {
2355 	case 81000:
2356 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2357 		break;
2358 	case 135000:
2359 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2360 		break;
2361 	case 270000:
2362 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2363 		break;
2364 		/* eDP 1.4 rates */
2365 	case 162000:
2366 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2367 		break;
2368 	case 108000:
2369 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2370 		break;
2371 	case 216000:
2372 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2373 		break;
2374 	case 324000:
2375 		/* Some SKUs may require elevated I/O voltage to support this */
2376 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2377 		break;
2378 	case 405000:
2379 		/* Some SKUs may require elevated I/O voltage to support this */
2380 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2381 		break;
2382 	}
2383 
2384 	memset(&crtc_state->dpll_hw_state, 0,
2385 	       sizeof(crtc_state->dpll_hw_state));
2386 
2387 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2388 
2389 	return true;
2390 }
2391 
2392 static bool cnl_get_dpll(struct intel_atomic_state *state,
2393 			 struct intel_crtc *crtc,
2394 			 struct intel_encoder *encoder)
2395 {
2396 	struct intel_crtc_state *crtc_state =
2397 		intel_atomic_get_new_crtc_state(state, crtc);
2398 	struct intel_shared_dpll *pll;
2399 	bool bret;
2400 
2401 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2402 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2403 		if (!bret) {
2404 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2405 			return false;
2406 		}
2407 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2408 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2409 		if (!bret) {
2410 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2411 			return false;
2412 		}
2413 	} else {
2414 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2415 			      crtc_state->output_types);
2416 		return false;
2417 	}
2418 
2419 	pll = intel_find_shared_dpll(state, crtc,
2420 				     &crtc_state->dpll_hw_state,
2421 				     BIT(DPLL_ID_SKL_DPLL2) |
2422 				     BIT(DPLL_ID_SKL_DPLL1) |
2423 				     BIT(DPLL_ID_SKL_DPLL0));
2424 	if (!pll) {
2425 		DRM_DEBUG_KMS("No PLL selected\n");
2426 		return false;
2427 	}
2428 
2429 	intel_reference_shared_dpll(state, crtc,
2430 				    pll, &crtc_state->dpll_hw_state);
2431 
2432 	crtc_state->shared_dpll = pll;
2433 
2434 	return true;
2435 }
2436 
2437 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2438 			      const struct intel_dpll_hw_state *hw_state)
2439 {
2440 	DRM_DEBUG_KMS("dpll_hw_state: "
2441 		      "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2442 		      hw_state->cfgcr0,
2443 		      hw_state->cfgcr1);
2444 }
2445 
2446 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2447 	.enable = cnl_ddi_pll_enable,
2448 	.disable = cnl_ddi_pll_disable,
2449 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2450 };
2451 
2452 static const struct dpll_info cnl_plls[] = {
2453 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2454 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2455 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2456 	{ },
2457 };
2458 
2459 static const struct intel_dpll_mgr cnl_pll_mgr = {
2460 	.dpll_info = cnl_plls,
2461 	.get_dplls = cnl_get_dpll,
2462 	.put_dplls = intel_put_dpll,
2463 	.dump_hw_state = cnl_dump_hw_state,
2464 };
2465 
2466 struct icl_combo_pll_params {
2467 	int clock;
2468 	struct skl_wrpll_params wrpll;
2469 };
2470 
2471 /*
2472  * These values alrea already adjusted: they're the bits we write to the
2473  * registers, not the logical values.
2474  */
2475 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2476 	{ 540000,
2477 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2478 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2479 	{ 270000,
2480 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2481 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2482 	{ 162000,
2483 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2484 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 	{ 324000,
2486 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2487 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2488 	{ 216000,
2489 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2490 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2491 	{ 432000,
2492 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2493 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 	{ 648000,
2495 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2496 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 	{ 810000,
2498 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2499 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 };
2501 
2502 
2503 /* Also used for 38.4 MHz values. */
2504 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2505 	{ 540000,
2506 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2507 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 	{ 270000,
2509 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2510 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2511 	{ 162000,
2512 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2513 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 	{ 324000,
2515 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2516 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 	{ 216000,
2518 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2519 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2520 	{ 432000,
2521 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2522 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 	{ 648000,
2524 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2525 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2526 	{ 810000,
2527 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2528 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2529 };
2530 
2531 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2532 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2533 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2534 };
2535 
2536 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2537 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2538 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2539 };
2540 
2541 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2542 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2543 	/* the following params are unused */
2544 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2545 };
2546 
2547 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2548 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2549 	/* the following params are unused */
2550 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2551 };
2552 
2553 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2554 				  struct skl_wrpll_params *pll_params)
2555 {
2556 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2557 	const struct icl_combo_pll_params *params =
2558 		dev_priv->cdclk.hw.ref == 24000 ?
2559 		icl_dp_combo_pll_24MHz_values :
2560 		icl_dp_combo_pll_19_2MHz_values;
2561 	int clock = crtc_state->port_clock;
2562 	int i;
2563 
2564 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2565 		if (clock == params[i].clock) {
2566 			*pll_params = params[i].wrpll;
2567 			return true;
2568 		}
2569 	}
2570 
2571 	MISSING_CASE(clock);
2572 	return false;
2573 }
2574 
2575 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2576 			     struct skl_wrpll_params *pll_params)
2577 {
2578 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2579 
2580 	if (INTEL_GEN(dev_priv) >= 12) {
2581 		switch (dev_priv->cdclk.hw.ref) {
2582 		default:
2583 			MISSING_CASE(dev_priv->cdclk.hw.ref);
2584 			/* fall-through */
2585 		case 19200:
2586 		case 38400:
2587 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2588 			break;
2589 		case 24000:
2590 			*pll_params = tgl_tbt_pll_24MHz_values;
2591 			break;
2592 		}
2593 	} else {
2594 		switch (dev_priv->cdclk.hw.ref) {
2595 		default:
2596 			MISSING_CASE(dev_priv->cdclk.hw.ref);
2597 			/* fall-through */
2598 		case 19200:
2599 		case 38400:
2600 			*pll_params = icl_tbt_pll_19_2MHz_values;
2601 			break;
2602 		case 24000:
2603 			*pll_params = icl_tbt_pll_24MHz_values;
2604 			break;
2605 		}
2606 	}
2607 
2608 	return true;
2609 }
2610 
2611 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2612 				struct intel_encoder *encoder,
2613 				struct intel_dpll_hw_state *pll_state)
2614 {
2615 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2616 	u32 cfgcr0, cfgcr1;
2617 	struct skl_wrpll_params pll_params = { 0 };
2618 	bool ret;
2619 
2620 	if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2621 							encoder->port)))
2622 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2623 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2624 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2625 		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2626 	else
2627 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2628 
2629 	if (!ret)
2630 		return false;
2631 
2632 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2633 		 pll_params.dco_integer;
2634 
2635 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2636 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2637 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2638 		 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2639 
2640 	if (INTEL_GEN(dev_priv) >= 12)
2641 		cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2642 	else
2643 		cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2644 
2645 	memset(pll_state, 0, sizeof(*pll_state));
2646 
2647 	pll_state->cfgcr0 = cfgcr0;
2648 	pll_state->cfgcr1 = cfgcr1;
2649 
2650 	return true;
2651 }
2652 
2653 
2654 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2655 {
2656 	return id - DPLL_ID_ICL_MGPLL1;
2657 }
2658 
2659 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2660 {
2661 	return tc_port + DPLL_ID_ICL_MGPLL1;
2662 }
2663 
2664 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2665 				     u32 *target_dco_khz,
2666 				     struct intel_dpll_hw_state *state,
2667 				     bool is_dkl)
2668 {
2669 	u32 dco_min_freq, dco_max_freq;
2670 	int div1_vals[] = {7, 5, 3, 2};
2671 	unsigned int i;
2672 	int div2;
2673 
2674 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2675 	dco_max_freq = is_dp ? 8100000 : 10000000;
2676 
2677 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2678 		int div1 = div1_vals[i];
2679 
2680 		for (div2 = 10; div2 > 0; div2--) {
2681 			int dco = div1 * div2 * clock_khz * 5;
2682 			int a_divratio, tlinedrv, inputsel;
2683 			u32 hsdiv;
2684 
2685 			if (dco < dco_min_freq || dco > dco_max_freq)
2686 				continue;
2687 
2688 			if (div2 >= 2) {
2689 				/*
2690 				 * Note: a_divratio not matching TGL BSpec
2691 				 * algorithm but matching hardcoded values and
2692 				 * working on HW for DP alt-mode at least
2693 				 */
2694 				a_divratio = is_dp ? 10 : 5;
2695 				tlinedrv = is_dkl ? 1 : 2;
2696 			} else {
2697 				a_divratio = 5;
2698 				tlinedrv = 0;
2699 			}
2700 			inputsel = is_dp ? 0 : 1;
2701 
2702 			switch (div1) {
2703 			default:
2704 				MISSING_CASE(div1);
2705 				/* fall through */
2706 			case 2:
2707 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2708 				break;
2709 			case 3:
2710 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2711 				break;
2712 			case 5:
2713 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2714 				break;
2715 			case 7:
2716 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2717 				break;
2718 			}
2719 
2720 			*target_dco_khz = dco;
2721 
2722 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2723 
2724 			state->mg_clktop2_coreclkctl1 =
2725 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2726 
2727 			state->mg_clktop2_hsclkctl =
2728 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2729 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2730 				hsdiv |
2731 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2732 
2733 			return true;
2734 		}
2735 	}
2736 
2737 	return false;
2738 }
2739 
2740 /*
2741  * The specification for this function uses real numbers, so the math had to be
2742  * adapted to integer-only calculation, that's why it looks so different.
2743  */
2744 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2745 				  struct intel_dpll_hw_state *pll_state)
2746 {
2747 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2748 	int refclk_khz = dev_priv->cdclk.hw.ref;
2749 	int clock = crtc_state->port_clock;
2750 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2751 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2752 	u32 prop_coeff, int_coeff;
2753 	u32 tdc_targetcnt, feedfwgain;
2754 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2755 	u64 tmp;
2756 	bool use_ssc = false;
2757 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2758 	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
2759 
2760 	memset(pll_state, 0, sizeof(*pll_state));
2761 
2762 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2763 				      pll_state, is_dkl)) {
2764 		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2765 		return false;
2766 	}
2767 
2768 	m1div = 2;
2769 	m2div_int = dco_khz / (refclk_khz * m1div);
2770 	if (m2div_int > 255) {
2771 		if (!is_dkl) {
2772 			m1div = 4;
2773 			m2div_int = dco_khz / (refclk_khz * m1div);
2774 		}
2775 
2776 		if (m2div_int > 255) {
2777 			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2778 				      clock);
2779 			return false;
2780 		}
2781 	}
2782 	m2div_rem = dco_khz % (refclk_khz * m1div);
2783 
2784 	tmp = (u64)m2div_rem * (1 << 22);
2785 	do_div(tmp, refclk_khz * m1div);
2786 	m2div_frac = tmp;
2787 
2788 	switch (refclk_khz) {
2789 	case 19200:
2790 		iref_ndiv = 1;
2791 		iref_trim = 28;
2792 		iref_pulse_w = 1;
2793 		break;
2794 	case 24000:
2795 		iref_ndiv = 1;
2796 		iref_trim = 25;
2797 		iref_pulse_w = 2;
2798 		break;
2799 	case 38400:
2800 		iref_ndiv = 2;
2801 		iref_trim = 28;
2802 		iref_pulse_w = 1;
2803 		break;
2804 	default:
2805 		MISSING_CASE(refclk_khz);
2806 		return false;
2807 	}
2808 
2809 	/*
2810 	 * tdc_res = 0.000003
2811 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2812 	 *
2813 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2814 	 * was supposed to be a division, but we rearranged the operations of
2815 	 * the formula to avoid early divisions so we don't multiply the
2816 	 * rounding errors.
2817 	 *
2818 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2819 	 * we also rearrange to work with integers.
2820 	 *
2821 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2822 	 * last division by 10.
2823 	 */
2824 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2825 
2826 	/*
2827 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2828 	 * 32 bits. That's not a problem since we round the division down
2829 	 * anyway.
2830 	 */
2831 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2832 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2833 
2834 	if (dco_khz >= 9000000) {
2835 		prop_coeff = 5;
2836 		int_coeff = 10;
2837 	} else {
2838 		prop_coeff = 4;
2839 		int_coeff = 8;
2840 	}
2841 
2842 	if (use_ssc) {
2843 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2844 		do_div(tmp, refclk_khz * m1div * 10000);
2845 		ssc_stepsize = tmp;
2846 
2847 		tmp = mul_u32_u32(dco_khz, 1000);
2848 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2849 	} else {
2850 		ssc_stepsize = 0;
2851 		ssc_steplen = 0;
2852 	}
2853 	ssc_steplog = 4;
2854 
2855 	/* write pll_state calculations */
2856 	if (is_dkl) {
2857 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2858 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2859 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2860 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2861 
2862 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2863 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2864 
2865 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2866 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2867 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2868 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2869 
2870 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2871 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2872 
2873 		pll_state->mg_pll_tdc_coldst_bias =
2874 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2875 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2876 
2877 	} else {
2878 		pll_state->mg_pll_div0 =
2879 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2880 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2881 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2882 
2883 		pll_state->mg_pll_div1 =
2884 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2885 			MG_PLL_DIV1_DITHER_DIV_2 |
2886 			MG_PLL_DIV1_NDIVRATIO(1) |
2887 			MG_PLL_DIV1_FBPREDIV(m1div);
2888 
2889 		pll_state->mg_pll_lf =
2890 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2891 			MG_PLL_LF_AFCCNTSEL_512 |
2892 			MG_PLL_LF_GAINCTRL(1) |
2893 			MG_PLL_LF_INT_COEFF(int_coeff) |
2894 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2895 
2896 		pll_state->mg_pll_frac_lock =
2897 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2898 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2899 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2900 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2901 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2902 		if (use_ssc || m2div_rem > 0)
2903 			pll_state->mg_pll_frac_lock |=
2904 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2905 
2906 		pll_state->mg_pll_ssc =
2907 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2908 			MG_PLL_SSC_TYPE(2) |
2909 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2910 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2911 			MG_PLL_SSC_FLLEN |
2912 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2913 
2914 		pll_state->mg_pll_tdc_coldst_bias =
2915 			MG_PLL_TDC_COLDST_COLDSTART |
2916 			MG_PLL_TDC_COLDST_IREFINT_EN |
2917 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2918 			MG_PLL_TDC_TDCOVCCORR_EN |
2919 			MG_PLL_TDC_TDCSEL(3);
2920 
2921 		pll_state->mg_pll_bias =
2922 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
2923 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2924 			MG_PLL_BIAS_BIAS_BONUS(10) |
2925 			MG_PLL_BIAS_BIASCAL_EN |
2926 			MG_PLL_BIAS_CTRIM(12) |
2927 			MG_PLL_BIAS_VREF_RDAC(4) |
2928 			MG_PLL_BIAS_IREFTRIM(iref_trim);
2929 
2930 		if (refclk_khz == 38400) {
2931 			pll_state->mg_pll_tdc_coldst_bias_mask =
2932 				MG_PLL_TDC_COLDST_COLDSTART;
2933 			pll_state->mg_pll_bias_mask = 0;
2934 		} else {
2935 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2936 			pll_state->mg_pll_bias_mask = -1U;
2937 		}
2938 
2939 		pll_state->mg_pll_tdc_coldst_bias &=
2940 			pll_state->mg_pll_tdc_coldst_bias_mask;
2941 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2942 	}
2943 
2944 	return true;
2945 }
2946 
2947 /**
2948  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2949  * @crtc_state: state for the CRTC to select the DPLL for
2950  * @port_dpll_id: the active @port_dpll_id to select
2951  *
2952  * Select the given @port_dpll_id instance from the DPLLs reserved for the
2953  * CRTC.
2954  */
2955 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2956 			      enum icl_port_dpll_id port_dpll_id)
2957 {
2958 	struct icl_port_dpll *port_dpll =
2959 		&crtc_state->icl_port_dplls[port_dpll_id];
2960 
2961 	crtc_state->shared_dpll = port_dpll->pll;
2962 	crtc_state->dpll_hw_state = port_dpll->hw_state;
2963 }
2964 
2965 static void icl_update_active_dpll(struct intel_atomic_state *state,
2966 				   struct intel_crtc *crtc,
2967 				   struct intel_encoder *encoder)
2968 {
2969 	struct intel_crtc_state *crtc_state =
2970 		intel_atomic_get_new_crtc_state(state, crtc);
2971 	struct intel_digital_port *primary_port;
2972 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2973 
2974 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
2975 		enc_to_mst(encoder)->primary :
2976 		enc_to_dig_port(encoder);
2977 
2978 	if (primary_port &&
2979 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
2980 	     primary_port->tc_mode == TC_PORT_LEGACY))
2981 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
2982 
2983 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
2984 }
2985 
2986 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
2987 				   struct intel_crtc *crtc,
2988 				   struct intel_encoder *encoder)
2989 {
2990 	struct intel_crtc_state *crtc_state =
2991 		intel_atomic_get_new_crtc_state(state, crtc);
2992 	struct icl_port_dpll *port_dpll =
2993 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2994 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2995 	enum port port = encoder->port;
2996 	unsigned long dpll_mask;
2997 
2998 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2999 		DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
3000 
3001 		return false;
3002 	}
3003 
3004 	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
3005 		dpll_mask =
3006 			BIT(DPLL_ID_EHL_DPLL4) |
3007 			BIT(DPLL_ID_ICL_DPLL1) |
3008 			BIT(DPLL_ID_ICL_DPLL0);
3009 	else
3010 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3011 
3012 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3013 						&port_dpll->hw_state,
3014 						dpll_mask);
3015 	if (!port_dpll->pll) {
3016 		DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
3017 			      encoder->base.base.id, encoder->base.name);
3018 		return false;
3019 	}
3020 
3021 	intel_reference_shared_dpll(state, crtc,
3022 				    port_dpll->pll, &port_dpll->hw_state);
3023 
3024 	icl_update_active_dpll(state, crtc, encoder);
3025 
3026 	return true;
3027 }
3028 
3029 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3030 				 struct intel_crtc *crtc,
3031 				 struct intel_encoder *encoder)
3032 {
3033 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3034 	struct intel_crtc_state *crtc_state =
3035 		intel_atomic_get_new_crtc_state(state, crtc);
3036 	struct icl_port_dpll *port_dpll;
3037 	enum intel_dpll_id dpll_id;
3038 
3039 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3040 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3041 		DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
3042 		return false;
3043 	}
3044 
3045 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3046 						&port_dpll->hw_state,
3047 						BIT(DPLL_ID_ICL_TBTPLL));
3048 	if (!port_dpll->pll) {
3049 		DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
3050 		return false;
3051 	}
3052 	intel_reference_shared_dpll(state, crtc,
3053 				    port_dpll->pll, &port_dpll->hw_state);
3054 
3055 
3056 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3057 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3058 		DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
3059 		goto err_unreference_tbt_pll;
3060 	}
3061 
3062 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3063 							 encoder->port));
3064 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3065 						&port_dpll->hw_state,
3066 						BIT(dpll_id));
3067 	if (!port_dpll->pll) {
3068 		DRM_DEBUG_KMS("No MG PHY PLL found\n");
3069 		goto err_unreference_tbt_pll;
3070 	}
3071 	intel_reference_shared_dpll(state, crtc,
3072 				    port_dpll->pll, &port_dpll->hw_state);
3073 
3074 	icl_update_active_dpll(state, crtc, encoder);
3075 
3076 	return true;
3077 
3078 err_unreference_tbt_pll:
3079 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3080 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3081 
3082 	return false;
3083 }
3084 
3085 static bool icl_get_dplls(struct intel_atomic_state *state,
3086 			  struct intel_crtc *crtc,
3087 			  struct intel_encoder *encoder)
3088 {
3089 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3090 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3091 
3092 	if (intel_phy_is_combo(dev_priv, phy))
3093 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3094 	else if (intel_phy_is_tc(dev_priv, phy))
3095 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3096 
3097 	MISSING_CASE(phy);
3098 
3099 	return false;
3100 }
3101 
3102 static void icl_put_dplls(struct intel_atomic_state *state,
3103 			  struct intel_crtc *crtc)
3104 {
3105 	const struct intel_crtc_state *old_crtc_state =
3106 		intel_atomic_get_old_crtc_state(state, crtc);
3107 	struct intel_crtc_state *new_crtc_state =
3108 		intel_atomic_get_new_crtc_state(state, crtc);
3109 	enum icl_port_dpll_id id;
3110 
3111 	new_crtc_state->shared_dpll = NULL;
3112 
3113 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3114 		const struct icl_port_dpll *old_port_dpll =
3115 			&old_crtc_state->icl_port_dplls[id];
3116 		struct icl_port_dpll *new_port_dpll =
3117 			&new_crtc_state->icl_port_dplls[id];
3118 
3119 		new_port_dpll->pll = NULL;
3120 
3121 		if (!old_port_dpll->pll)
3122 			continue;
3123 
3124 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3125 	}
3126 }
3127 
3128 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3129 				struct intel_shared_dpll *pll,
3130 				struct intel_dpll_hw_state *hw_state)
3131 {
3132 	const enum intel_dpll_id id = pll->info->id;
3133 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3134 	intel_wakeref_t wakeref;
3135 	bool ret = false;
3136 	u32 val;
3137 
3138 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3139 						     POWER_DOMAIN_DISPLAY_CORE);
3140 	if (!wakeref)
3141 		return false;
3142 
3143 	val = I915_READ(MG_PLL_ENABLE(tc_port));
3144 	if (!(val & PLL_ENABLE))
3145 		goto out;
3146 
3147 	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
3148 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3149 
3150 	hw_state->mg_clktop2_coreclkctl1 =
3151 		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3152 	hw_state->mg_clktop2_coreclkctl1 &=
3153 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3154 
3155 	hw_state->mg_clktop2_hsclkctl =
3156 		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3157 	hw_state->mg_clktop2_hsclkctl &=
3158 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3159 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3160 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3161 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3162 
3163 	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
3164 	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
3165 	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
3166 	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
3167 	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
3168 
3169 	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
3170 	hw_state->mg_pll_tdc_coldst_bias =
3171 		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3172 
3173 	if (dev_priv->cdclk.hw.ref == 38400) {
3174 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3175 		hw_state->mg_pll_bias_mask = 0;
3176 	} else {
3177 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3178 		hw_state->mg_pll_bias_mask = -1U;
3179 	}
3180 
3181 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3182 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3183 
3184 	ret = true;
3185 out:
3186 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3187 	return ret;
3188 }
3189 
3190 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3191 				 struct intel_shared_dpll *pll,
3192 				 struct intel_dpll_hw_state *hw_state)
3193 {
3194 	const enum intel_dpll_id id = pll->info->id;
3195 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3196 	intel_wakeref_t wakeref;
3197 	bool ret = false;
3198 	u32 val;
3199 
3200 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3201 						     POWER_DOMAIN_DISPLAY_CORE);
3202 	if (!wakeref)
3203 		return false;
3204 
3205 	val = I915_READ(MG_PLL_ENABLE(tc_port));
3206 	if (!(val & PLL_ENABLE))
3207 		goto out;
3208 
3209 	/*
3210 	 * All registers read here have the same HIP_INDEX_REG even though
3211 	 * they are on different building blocks
3212 	 */
3213 	I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
3214 
3215 	hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
3216 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3217 
3218 	hw_state->mg_clktop2_hsclkctl =
3219 		I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
3220 	hw_state->mg_clktop2_hsclkctl &=
3221 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3222 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3223 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3224 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3225 
3226 	hw_state->mg_clktop2_coreclkctl1 =
3227 		I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
3228 	hw_state->mg_clktop2_coreclkctl1 &=
3229 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3230 
3231 	hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
3232 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3233 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3234 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3235 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3236 
3237 	hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
3238 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3239 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3240 
3241 	hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
3242 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3243 				 DKL_PLL_SSC_STEP_LEN_MASK |
3244 				 DKL_PLL_SSC_STEP_NUM_MASK |
3245 				 DKL_PLL_SSC_EN);
3246 
3247 	hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
3248 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3249 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3250 
3251 	hw_state->mg_pll_tdc_coldst_bias =
3252 		I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
3253 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3254 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3255 
3256 	ret = true;
3257 out:
3258 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3259 	return ret;
3260 }
3261 
3262 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3263 				 struct intel_shared_dpll *pll,
3264 				 struct intel_dpll_hw_state *hw_state,
3265 				 i915_reg_t enable_reg)
3266 {
3267 	const enum intel_dpll_id id = pll->info->id;
3268 	intel_wakeref_t wakeref;
3269 	bool ret = false;
3270 	u32 val;
3271 
3272 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3273 						     POWER_DOMAIN_DISPLAY_CORE);
3274 	if (!wakeref)
3275 		return false;
3276 
3277 	val = I915_READ(enable_reg);
3278 	if (!(val & PLL_ENABLE))
3279 		goto out;
3280 
3281 	if (INTEL_GEN(dev_priv) >= 12) {
3282 		hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
3283 		hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
3284 	} else {
3285 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3286 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
3287 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
3288 		} else {
3289 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3290 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3291 		}
3292 	}
3293 
3294 	ret = true;
3295 out:
3296 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3297 	return ret;
3298 }
3299 
3300 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3301 				   struct intel_shared_dpll *pll,
3302 				   struct intel_dpll_hw_state *hw_state)
3303 {
3304 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3305 
3306 	if (IS_ELKHARTLAKE(dev_priv) &&
3307 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3308 		enable_reg = MG_PLL_ENABLE(0);
3309 	}
3310 
3311 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3312 }
3313 
3314 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3315 				 struct intel_shared_dpll *pll,
3316 				 struct intel_dpll_hw_state *hw_state)
3317 {
3318 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3319 }
3320 
3321 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3322 			   struct intel_shared_dpll *pll)
3323 {
3324 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3325 	const enum intel_dpll_id id = pll->info->id;
3326 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3327 
3328 	if (INTEL_GEN(dev_priv) >= 12) {
3329 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3330 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3331 	} else {
3332 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3333 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3334 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3335 		} else {
3336 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3337 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3338 		}
3339 	}
3340 
3341 	I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
3342 	I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
3343 	POSTING_READ(cfgcr1_reg);
3344 }
3345 
3346 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3347 			     struct intel_shared_dpll *pll)
3348 {
3349 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3350 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3351 	u32 val;
3352 
3353 	/*
3354 	 * Some of the following registers have reserved fields, so program
3355 	 * these with RMW based on a mask. The mask can be fixed or generated
3356 	 * during the calc/readout phase if the mask depends on some other HW
3357 	 * state like refclk, see icl_calc_mg_pll_state().
3358 	 */
3359 	val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3360 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3361 	val |= hw_state->mg_refclkin_ctl;
3362 	I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3363 
3364 	val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3365 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3366 	val |= hw_state->mg_clktop2_coreclkctl1;
3367 	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3368 
3369 	val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3370 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3371 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3372 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3373 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3374 	val |= hw_state->mg_clktop2_hsclkctl;
3375 	I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3376 
3377 	I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3378 	I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3379 	I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3380 	I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3381 	I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3382 
3383 	val = I915_READ(MG_PLL_BIAS(tc_port));
3384 	val &= ~hw_state->mg_pll_bias_mask;
3385 	val |= hw_state->mg_pll_bias;
3386 	I915_WRITE(MG_PLL_BIAS(tc_port), val);
3387 
3388 	val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3389 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3390 	val |= hw_state->mg_pll_tdc_coldst_bias;
3391 	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3392 
3393 	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3394 }
3395 
3396 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3397 			  struct intel_shared_dpll *pll)
3398 {
3399 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3400 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3401 	u32 val;
3402 
3403 	/*
3404 	 * All registers programmed here have the same HIP_INDEX_REG even
3405 	 * though on different building block
3406 	 */
3407 	I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
3408 
3409 	/* All the registers are RMW */
3410 	val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
3411 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3412 	val |= hw_state->mg_refclkin_ctl;
3413 	I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
3414 
3415 	val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
3416 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3417 	val |= hw_state->mg_clktop2_coreclkctl1;
3418 	I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3419 
3420 	val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
3421 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3422 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3423 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3424 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3425 	val |= hw_state->mg_clktop2_hsclkctl;
3426 	I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3427 
3428 	val = I915_READ(DKL_PLL_DIV0(tc_port));
3429 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3430 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3431 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3432 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3433 	val |= hw_state->mg_pll_div0;
3434 	I915_WRITE(DKL_PLL_DIV0(tc_port), val);
3435 
3436 	val = I915_READ(DKL_PLL_DIV1(tc_port));
3437 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3438 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3439 	val |= hw_state->mg_pll_div1;
3440 	I915_WRITE(DKL_PLL_DIV1(tc_port), val);
3441 
3442 	val = I915_READ(DKL_PLL_SSC(tc_port));
3443 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3444 		 DKL_PLL_SSC_STEP_LEN_MASK |
3445 		 DKL_PLL_SSC_STEP_NUM_MASK |
3446 		 DKL_PLL_SSC_EN);
3447 	val |= hw_state->mg_pll_ssc;
3448 	I915_WRITE(DKL_PLL_SSC(tc_port), val);
3449 
3450 	val = I915_READ(DKL_PLL_BIAS(tc_port));
3451 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3452 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3453 	val |= hw_state->mg_pll_bias;
3454 	I915_WRITE(DKL_PLL_BIAS(tc_port), val);
3455 
3456 	val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
3457 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3458 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3459 	val |= hw_state->mg_pll_tdc_coldst_bias;
3460 	I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3461 
3462 	POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
3463 }
3464 
3465 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3466 				 struct intel_shared_dpll *pll,
3467 				 i915_reg_t enable_reg)
3468 {
3469 	u32 val;
3470 
3471 	val = I915_READ(enable_reg);
3472 	val |= PLL_POWER_ENABLE;
3473 	I915_WRITE(enable_reg, val);
3474 
3475 	/*
3476 	 * The spec says we need to "wait" but it also says it should be
3477 	 * immediate.
3478 	 */
3479 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3480 		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3481 }
3482 
3483 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3484 			   struct intel_shared_dpll *pll,
3485 			   i915_reg_t enable_reg)
3486 {
3487 	u32 val;
3488 
3489 	val = I915_READ(enable_reg);
3490 	val |= PLL_ENABLE;
3491 	I915_WRITE(enable_reg, val);
3492 
3493 	/* Timeout is actually 600us. */
3494 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3495 		DRM_ERROR("PLL %d not locked\n", pll->info->id);
3496 }
3497 
3498 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3499 			     struct intel_shared_dpll *pll)
3500 {
3501 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3502 
3503 	if (IS_ELKHARTLAKE(dev_priv) &&
3504 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3505 		enable_reg = MG_PLL_ENABLE(0);
3506 
3507 		/*
3508 		 * We need to disable DC states when this DPLL is enabled.
3509 		 * This can be done by taking a reference on DPLL4 power
3510 		 * domain.
3511 		 */
3512 		pll->wakeref = intel_display_power_get(dev_priv,
3513 						       POWER_DOMAIN_DPLL_DC_OFF);
3514 	}
3515 
3516 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3517 
3518 	icl_dpll_write(dev_priv, pll);
3519 
3520 	/*
3521 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3522 	 * paths should already be setting the appropriate voltage, hence we do
3523 	 * nothing here.
3524 	 */
3525 
3526 	icl_pll_enable(dev_priv, pll, enable_reg);
3527 
3528 	/* DVFS post sequence would be here. See the comment above. */
3529 }
3530 
3531 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3532 			   struct intel_shared_dpll *pll)
3533 {
3534 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3535 
3536 	icl_dpll_write(dev_priv, pll);
3537 
3538 	/*
3539 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3540 	 * paths should already be setting the appropriate voltage, hence we do
3541 	 * nothing here.
3542 	 */
3543 
3544 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3545 
3546 	/* DVFS post sequence would be here. See the comment above. */
3547 }
3548 
3549 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3550 			  struct intel_shared_dpll *pll)
3551 {
3552 	i915_reg_t enable_reg =
3553 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3554 
3555 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3556 
3557 	if (INTEL_GEN(dev_priv) >= 12)
3558 		dkl_pll_write(dev_priv, pll);
3559 	else
3560 		icl_mg_pll_write(dev_priv, pll);
3561 
3562 	/*
3563 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3564 	 * paths should already be setting the appropriate voltage, hence we do
3565 	 * nothing here.
3566 	 */
3567 
3568 	icl_pll_enable(dev_priv, pll, enable_reg);
3569 
3570 	/* DVFS post sequence would be here. See the comment above. */
3571 }
3572 
3573 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3574 			    struct intel_shared_dpll *pll,
3575 			    i915_reg_t enable_reg)
3576 {
3577 	u32 val;
3578 
3579 	/* The first steps are done by intel_ddi_post_disable(). */
3580 
3581 	/*
3582 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3583 	 * paths should already be setting the appropriate voltage, hence we do
3584 	 * nothign here.
3585 	 */
3586 
3587 	val = I915_READ(enable_reg);
3588 	val &= ~PLL_ENABLE;
3589 	I915_WRITE(enable_reg, val);
3590 
3591 	/* Timeout is actually 1us. */
3592 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3593 		DRM_ERROR("PLL %d locked\n", pll->info->id);
3594 
3595 	/* DVFS post sequence would be here. See the comment above. */
3596 
3597 	val = I915_READ(enable_reg);
3598 	val &= ~PLL_POWER_ENABLE;
3599 	I915_WRITE(enable_reg, val);
3600 
3601 	/*
3602 	 * The spec says we need to "wait" but it also says it should be
3603 	 * immediate.
3604 	 */
3605 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3606 		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3607 }
3608 
3609 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3610 			      struct intel_shared_dpll *pll)
3611 {
3612 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3613 
3614 	if (IS_ELKHARTLAKE(dev_priv) &&
3615 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3616 		enable_reg = MG_PLL_ENABLE(0);
3617 		icl_pll_disable(dev_priv, pll, enable_reg);
3618 
3619 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3620 					pll->wakeref);
3621 		return;
3622 	}
3623 
3624 	icl_pll_disable(dev_priv, pll, enable_reg);
3625 }
3626 
3627 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3628 			    struct intel_shared_dpll *pll)
3629 {
3630 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3631 }
3632 
3633 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3634 			   struct intel_shared_dpll *pll)
3635 {
3636 	i915_reg_t enable_reg =
3637 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3638 
3639 	icl_pll_disable(dev_priv, pll, enable_reg);
3640 }
3641 
3642 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3643 			      const struct intel_dpll_hw_state *hw_state)
3644 {
3645 	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3646 		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3647 		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3648 		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3649 		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3650 		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3651 		      hw_state->cfgcr0, hw_state->cfgcr1,
3652 		      hw_state->mg_refclkin_ctl,
3653 		      hw_state->mg_clktop2_coreclkctl1,
3654 		      hw_state->mg_clktop2_hsclkctl,
3655 		      hw_state->mg_pll_div0,
3656 		      hw_state->mg_pll_div1,
3657 		      hw_state->mg_pll_lf,
3658 		      hw_state->mg_pll_frac_lock,
3659 		      hw_state->mg_pll_ssc,
3660 		      hw_state->mg_pll_bias,
3661 		      hw_state->mg_pll_tdc_coldst_bias);
3662 }
3663 
3664 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3665 	.enable = combo_pll_enable,
3666 	.disable = combo_pll_disable,
3667 	.get_hw_state = combo_pll_get_hw_state,
3668 };
3669 
3670 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3671 	.enable = tbt_pll_enable,
3672 	.disable = tbt_pll_disable,
3673 	.get_hw_state = tbt_pll_get_hw_state,
3674 };
3675 
3676 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3677 	.enable = mg_pll_enable,
3678 	.disable = mg_pll_disable,
3679 	.get_hw_state = mg_pll_get_hw_state,
3680 };
3681 
3682 static const struct dpll_info icl_plls[] = {
3683 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3684 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3685 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3686 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3687 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3688 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3689 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3690 	{ },
3691 };
3692 
3693 static const struct intel_dpll_mgr icl_pll_mgr = {
3694 	.dpll_info = icl_plls,
3695 	.get_dplls = icl_get_dplls,
3696 	.put_dplls = icl_put_dplls,
3697 	.update_active_dpll = icl_update_active_dpll,
3698 	.dump_hw_state = icl_dump_hw_state,
3699 };
3700 
3701 static const struct dpll_info ehl_plls[] = {
3702 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3703 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3704 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3705 	{ },
3706 };
3707 
3708 static const struct intel_dpll_mgr ehl_pll_mgr = {
3709 	.dpll_info = ehl_plls,
3710 	.get_dplls = icl_get_dplls,
3711 	.put_dplls = icl_put_dplls,
3712 	.dump_hw_state = icl_dump_hw_state,
3713 };
3714 
3715 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3716 	.enable = mg_pll_enable,
3717 	.disable = mg_pll_disable,
3718 	.get_hw_state = dkl_pll_get_hw_state,
3719 };
3720 
3721 static const struct dpll_info tgl_plls[] = {
3722 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3723 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3724 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3725 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3726 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3727 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3728 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3729 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3730 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3731 	{ },
3732 };
3733 
3734 static const struct intel_dpll_mgr tgl_pll_mgr = {
3735 	.dpll_info = tgl_plls,
3736 	.get_dplls = icl_get_dplls,
3737 	.put_dplls = icl_put_dplls,
3738 	.update_active_dpll = icl_update_active_dpll,
3739 	.dump_hw_state = icl_dump_hw_state,
3740 };
3741 
3742 /**
3743  * intel_shared_dpll_init - Initialize shared DPLLs
3744  * @dev: drm device
3745  *
3746  * Initialize shared DPLLs for @dev.
3747  */
3748 void intel_shared_dpll_init(struct drm_device *dev)
3749 {
3750 	struct drm_i915_private *dev_priv = to_i915(dev);
3751 	const struct intel_dpll_mgr *dpll_mgr = NULL;
3752 	const struct dpll_info *dpll_info;
3753 	int i;
3754 
3755 	if (INTEL_GEN(dev_priv) >= 12)
3756 		dpll_mgr = &tgl_pll_mgr;
3757 	else if (IS_ELKHARTLAKE(dev_priv))
3758 		dpll_mgr = &ehl_pll_mgr;
3759 	else if (INTEL_GEN(dev_priv) >= 11)
3760 		dpll_mgr = &icl_pll_mgr;
3761 	else if (IS_CANNONLAKE(dev_priv))
3762 		dpll_mgr = &cnl_pll_mgr;
3763 	else if (IS_GEN9_BC(dev_priv))
3764 		dpll_mgr = &skl_pll_mgr;
3765 	else if (IS_GEN9_LP(dev_priv))
3766 		dpll_mgr = &bxt_pll_mgr;
3767 	else if (HAS_DDI(dev_priv))
3768 		dpll_mgr = &hsw_pll_mgr;
3769 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3770 		dpll_mgr = &pch_pll_mgr;
3771 
3772 	if (!dpll_mgr) {
3773 		dev_priv->num_shared_dpll = 0;
3774 		return;
3775 	}
3776 
3777 	dpll_info = dpll_mgr->dpll_info;
3778 
3779 	for (i = 0; dpll_info[i].name; i++) {
3780 		WARN_ON(i != dpll_info[i].id);
3781 		dev_priv->shared_dplls[i].info = &dpll_info[i];
3782 	}
3783 
3784 	dev_priv->dpll_mgr = dpll_mgr;
3785 	dev_priv->num_shared_dpll = i;
3786 	mutex_init(&dev_priv->dpll_lock);
3787 
3788 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3789 }
3790 
3791 /**
3792  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3793  * @state: atomic state
3794  * @crtc: CRTC to reserve DPLLs for
3795  * @encoder: encoder
3796  *
3797  * This function reserves all required DPLLs for the given CRTC and encoder
3798  * combination in the current atomic commit @state and the new @crtc atomic
3799  * state.
3800  *
3801  * The new configuration in the atomic commit @state is made effective by
3802  * calling intel_shared_dpll_swap_state().
3803  *
3804  * The reserved DPLLs should be released by calling
3805  * intel_release_shared_dplls().
3806  *
3807  * Returns:
3808  * True if all required DPLLs were successfully reserved.
3809  */
3810 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3811 				struct intel_crtc *crtc,
3812 				struct intel_encoder *encoder)
3813 {
3814 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3815 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3816 
3817 	if (WARN_ON(!dpll_mgr))
3818 		return false;
3819 
3820 	return dpll_mgr->get_dplls(state, crtc, encoder);
3821 }
3822 
3823 /**
3824  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3825  * @state: atomic state
3826  * @crtc: crtc from which the DPLLs are to be released
3827  *
3828  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3829  * from the current atomic commit @state and the old @crtc atomic state.
3830  *
3831  * The new configuration in the atomic commit @state is made effective by
3832  * calling intel_shared_dpll_swap_state().
3833  */
3834 void intel_release_shared_dplls(struct intel_atomic_state *state,
3835 				struct intel_crtc *crtc)
3836 {
3837 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3838 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3839 
3840 	/*
3841 	 * FIXME: this function is called for every platform having a
3842 	 * compute_clock hook, even though the platform doesn't yet support
3843 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3844 	 * called on those.
3845 	 */
3846 	if (!dpll_mgr)
3847 		return;
3848 
3849 	dpll_mgr->put_dplls(state, crtc);
3850 }
3851 
3852 /**
3853  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3854  * @state: atomic state
3855  * @crtc: the CRTC for which to update the active DPLL
3856  * @encoder: encoder determining the type of port DPLL
3857  *
3858  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3859  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3860  * DPLL selected will be based on the current mode of the encoder's port.
3861  */
3862 void intel_update_active_dpll(struct intel_atomic_state *state,
3863 			      struct intel_crtc *crtc,
3864 			      struct intel_encoder *encoder)
3865 {
3866 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3867 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3868 
3869 	if (WARN_ON(!dpll_mgr))
3870 		return;
3871 
3872 	dpll_mgr->update_active_dpll(state, crtc, encoder);
3873 }
3874 
3875 /**
3876  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3877  * @dev_priv: i915 drm device
3878  * @hw_state: hw state to be written to the log
3879  *
3880  * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3881  */
3882 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3883 			      const struct intel_dpll_hw_state *hw_state)
3884 {
3885 	if (dev_priv->dpll_mgr) {
3886 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3887 	} else {
3888 		/* fallback for platforms that don't use the shared dpll
3889 		 * infrastructure
3890 		 */
3891 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3892 			      "fp0: 0x%x, fp1: 0x%x\n",
3893 			      hw_state->dpll,
3894 			      hw_state->dpll_md,
3895 			      hw_state->fp0,
3896 			      hw_state->fp1);
3897 	}
3898 }
3899