1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_dpio_phy.h"
25 #include "intel_dpll_mgr.h"
26 #include "intel_drv.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 static void
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 				  struct intel_shared_dpll_state *shared_dpll)
51 {
52 	enum intel_dpll_id i;
53 
54 	/* Copy shared dpll state */
55 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
57 
58 		shared_dpll[i] = pll->state;
59 	}
60 }
61 
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
64 {
65 	struct intel_atomic_state *state = to_intel_atomic_state(s);
66 
67 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
68 
69 	if (!state->dpll_set) {
70 		state->dpll_set = true;
71 
72 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 						  state->shared_dpll);
74 	}
75 
76 	return state->shared_dpll;
77 }
78 
79 /**
80  * intel_get_shared_dpll_by_id - get a DPLL given its id
81  * @dev_priv: i915 device instance
82  * @id: pll id
83  *
84  * Returns:
85  * A pointer to the DPLL with @id
86  */
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 			    enum intel_dpll_id id)
90 {
91 	return &dev_priv->shared_dplls[id];
92 }
93 
94 /**
95  * intel_get_shared_dpll_id - get the id of a DPLL
96  * @dev_priv: i915 device instance
97  * @pll: the DPLL
98  *
99  * Returns:
100  * The id of @pll
101  */
102 enum intel_dpll_id
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 			 struct intel_shared_dpll *pll)
105 {
106 	if (WARN_ON(pll < dev_priv->shared_dplls||
107 		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
108 		return -1;
109 
110 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 }
112 
113 /* For ILK+ */
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 			struct intel_shared_dpll *pll,
116 			bool state)
117 {
118 	bool cur_state;
119 	struct intel_dpll_hw_state hw_state;
120 
121 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
122 		return;
123 
124 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
125 	I915_STATE_WARN(cur_state != state,
126 	     "%s assertion failure (expected %s, current %s)\n",
127 			pll->info->name, onoff(state), onoff(cur_state));
128 }
129 
130 /**
131  * intel_prepare_shared_dpll - call a dpll's prepare hook
132  * @crtc_state: CRTC, and its state, which has a shared dpll
133  *
134  * This calls the PLL's prepare hook if it has one and if the PLL is not
135  * already enabled. The prepare hook is platform specific.
136  */
137 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
138 {
139 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
140 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
141 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
142 
143 	if (WARN_ON(pll == NULL))
144 		return;
145 
146 	mutex_lock(&dev_priv->dpll_lock);
147 	WARN_ON(!pll->state.crtc_mask);
148 	if (!pll->active_mask) {
149 		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
150 		WARN_ON(pll->on);
151 		assert_shared_dpll_disabled(dev_priv, pll);
152 
153 		pll->info->funcs->prepare(dev_priv, pll);
154 	}
155 	mutex_unlock(&dev_priv->dpll_lock);
156 }
157 
158 /**
159  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
160  * @crtc_state: CRTC, and its state, which has a shared DPLL
161  *
162  * Enable the shared DPLL used by @crtc.
163  */
164 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
165 {
166 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
167 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
168 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
169 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
170 	unsigned int old_mask;
171 
172 	if (WARN_ON(pll == NULL))
173 		return;
174 
175 	mutex_lock(&dev_priv->dpll_lock);
176 	old_mask = pll->active_mask;
177 
178 	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
179 	    WARN_ON(pll->active_mask & crtc_mask))
180 		goto out;
181 
182 	pll->active_mask |= crtc_mask;
183 
184 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
185 		      pll->info->name, pll->active_mask, pll->on,
186 		      crtc->base.base.id);
187 
188 	if (old_mask) {
189 		WARN_ON(!pll->on);
190 		assert_shared_dpll_enabled(dev_priv, pll);
191 		goto out;
192 	}
193 	WARN_ON(pll->on);
194 
195 	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
196 	pll->info->funcs->enable(dev_priv, pll);
197 	pll->on = true;
198 
199 out:
200 	mutex_unlock(&dev_priv->dpll_lock);
201 }
202 
203 /**
204  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
205  * @crtc_state: CRTC, and its state, which has a shared DPLL
206  *
207  * Disable the shared DPLL used by @crtc.
208  */
209 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
210 {
211 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
212 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
213 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
214 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
215 
216 	/* PCH only available on ILK+ */
217 	if (INTEL_GEN(dev_priv) < 5)
218 		return;
219 
220 	if (pll == NULL)
221 		return;
222 
223 	mutex_lock(&dev_priv->dpll_lock);
224 	if (WARN_ON(!(pll->active_mask & crtc_mask)))
225 		goto out;
226 
227 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
228 		      pll->info->name, pll->active_mask, pll->on,
229 		      crtc->base.base.id);
230 
231 	assert_shared_dpll_enabled(dev_priv, pll);
232 	WARN_ON(!pll->on);
233 
234 	pll->active_mask &= ~crtc_mask;
235 	if (pll->active_mask)
236 		goto out;
237 
238 	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
239 	pll->info->funcs->disable(dev_priv, pll);
240 	pll->on = false;
241 
242 out:
243 	mutex_unlock(&dev_priv->dpll_lock);
244 }
245 
246 static struct intel_shared_dpll *
247 intel_find_shared_dpll(struct intel_atomic_state *state,
248 		       const struct intel_crtc *crtc,
249 		       const struct intel_dpll_hw_state *pll_state,
250 		       enum intel_dpll_id range_min,
251 		       enum intel_dpll_id range_max)
252 {
253 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
254 	struct intel_shared_dpll *pll, *unused_pll = NULL;
255 	struct intel_shared_dpll_state *shared_dpll;
256 	enum intel_dpll_id i;
257 
258 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
259 
260 	for (i = range_min; i <= range_max; i++) {
261 		pll = &dev_priv->shared_dplls[i];
262 
263 		/* Only want to check enabled timings first */
264 		if (shared_dpll[i].crtc_mask == 0) {
265 			if (!unused_pll)
266 				unused_pll = pll;
267 			continue;
268 		}
269 
270 		if (memcmp(pll_state,
271 			   &shared_dpll[i].hw_state,
272 			   sizeof(*pll_state)) == 0) {
273 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
274 				      crtc->base.base.id, crtc->base.name,
275 				      pll->info->name,
276 				      shared_dpll[i].crtc_mask,
277 				      pll->active_mask);
278 			return pll;
279 		}
280 	}
281 
282 	/* Ok no matching timings, maybe there's a free one? */
283 	if (unused_pll) {
284 		DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
285 			      crtc->base.base.id, crtc->base.name,
286 			      unused_pll->info->name);
287 		return unused_pll;
288 	}
289 
290 	return NULL;
291 }
292 
293 static void
294 intel_reference_shared_dpll(struct intel_atomic_state *state,
295 			    const struct intel_crtc *crtc,
296 			    const struct intel_shared_dpll *pll,
297 			    const struct intel_dpll_hw_state *pll_state)
298 {
299 	struct intel_shared_dpll_state *shared_dpll;
300 	const enum intel_dpll_id id = pll->info->id;
301 
302 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
303 
304 	if (shared_dpll[id].crtc_mask == 0)
305 		shared_dpll[id].hw_state = *pll_state;
306 
307 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
308 			 pipe_name(crtc->pipe));
309 
310 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
311 }
312 
313 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
314 					  const struct intel_crtc *crtc,
315 					  const struct intel_shared_dpll *pll)
316 {
317 	struct intel_shared_dpll_state *shared_dpll;
318 
319 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
320 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
321 }
322 
323 static void intel_put_dpll(struct intel_atomic_state *state,
324 			   struct intel_crtc *crtc)
325 {
326 	const struct intel_crtc_state *old_crtc_state =
327 		intel_atomic_get_old_crtc_state(state, crtc);
328 	struct intel_crtc_state *new_crtc_state =
329 		intel_atomic_get_new_crtc_state(state, crtc);
330 
331 	new_crtc_state->shared_dpll = NULL;
332 
333 	if (!old_crtc_state->shared_dpll)
334 		return;
335 
336 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
337 }
338 
339 /**
340  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
341  * @state: atomic state
342  *
343  * This is the dpll version of drm_atomic_helper_swap_state() since the
344  * helper does not handle driver-specific global state.
345  *
346  * For consistency with atomic helpers this function does a complete swap,
347  * i.e. it also puts the current state into @state, even though there is no
348  * need for that at this moment.
349  */
350 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
351 {
352 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
353 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
354 	enum intel_dpll_id i;
355 
356 	if (!state->dpll_set)
357 		return;
358 
359 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
360 		struct intel_shared_dpll *pll =
361 			&dev_priv->shared_dplls[i];
362 
363 		swap(pll->state, shared_dpll[i]);
364 	}
365 }
366 
367 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
368 				      struct intel_shared_dpll *pll,
369 				      struct intel_dpll_hw_state *hw_state)
370 {
371 	const enum intel_dpll_id id = pll->info->id;
372 	intel_wakeref_t wakeref;
373 	u32 val;
374 
375 	wakeref = intel_display_power_get_if_enabled(dev_priv,
376 						     POWER_DOMAIN_DISPLAY_CORE);
377 	if (!wakeref)
378 		return false;
379 
380 	val = I915_READ(PCH_DPLL(id));
381 	hw_state->dpll = val;
382 	hw_state->fp0 = I915_READ(PCH_FP0(id));
383 	hw_state->fp1 = I915_READ(PCH_FP1(id));
384 
385 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
386 
387 	return val & DPLL_VCO_ENABLE;
388 }
389 
390 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
391 				 struct intel_shared_dpll *pll)
392 {
393 	const enum intel_dpll_id id = pll->info->id;
394 
395 	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
396 	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
397 }
398 
399 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
400 {
401 	u32 val;
402 	bool enabled;
403 
404 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
405 
406 	val = I915_READ(PCH_DREF_CONTROL);
407 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
408 			    DREF_SUPERSPREAD_SOURCE_MASK));
409 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
410 }
411 
412 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
413 				struct intel_shared_dpll *pll)
414 {
415 	const enum intel_dpll_id id = pll->info->id;
416 
417 	/* PCH refclock must be enabled first */
418 	ibx_assert_pch_refclk_enabled(dev_priv);
419 
420 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
421 
422 	/* Wait for the clocks to stabilize. */
423 	POSTING_READ(PCH_DPLL(id));
424 	udelay(150);
425 
426 	/* The pixel multiplier can only be updated once the
427 	 * DPLL is enabled and the clocks are stable.
428 	 *
429 	 * So write it again.
430 	 */
431 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
432 	POSTING_READ(PCH_DPLL(id));
433 	udelay(200);
434 }
435 
436 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
437 				 struct intel_shared_dpll *pll)
438 {
439 	const enum intel_dpll_id id = pll->info->id;
440 
441 	I915_WRITE(PCH_DPLL(id), 0);
442 	POSTING_READ(PCH_DPLL(id));
443 	udelay(200);
444 }
445 
446 static bool ibx_get_dpll(struct intel_atomic_state *state,
447 			 struct intel_crtc *crtc,
448 			 struct intel_encoder *encoder)
449 {
450 	struct intel_crtc_state *crtc_state =
451 		intel_atomic_get_new_crtc_state(state, crtc);
452 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
453 	struct intel_shared_dpll *pll;
454 	enum intel_dpll_id i;
455 
456 	if (HAS_PCH_IBX(dev_priv)) {
457 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
458 		i = (enum intel_dpll_id) crtc->pipe;
459 		pll = &dev_priv->shared_dplls[i];
460 
461 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
462 			      crtc->base.base.id, crtc->base.name,
463 			      pll->info->name);
464 	} else {
465 		pll = intel_find_shared_dpll(state, crtc,
466 					     &crtc_state->dpll_hw_state,
467 					     DPLL_ID_PCH_PLL_A,
468 					     DPLL_ID_PCH_PLL_B);
469 	}
470 
471 	if (!pll)
472 		return false;
473 
474 	/* reference the pll */
475 	intel_reference_shared_dpll(state, crtc,
476 				    pll, &crtc_state->dpll_hw_state);
477 
478 	crtc_state->shared_dpll = pll;
479 
480 	return true;
481 }
482 
483 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
484 			      const struct intel_dpll_hw_state *hw_state)
485 {
486 	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
487 		      "fp0: 0x%x, fp1: 0x%x\n",
488 		      hw_state->dpll,
489 		      hw_state->dpll_md,
490 		      hw_state->fp0,
491 		      hw_state->fp1);
492 }
493 
494 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
495 	.prepare = ibx_pch_dpll_prepare,
496 	.enable = ibx_pch_dpll_enable,
497 	.disable = ibx_pch_dpll_disable,
498 	.get_hw_state = ibx_pch_dpll_get_hw_state,
499 };
500 
501 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
502 			       struct intel_shared_dpll *pll)
503 {
504 	const enum intel_dpll_id id = pll->info->id;
505 
506 	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
507 	POSTING_READ(WRPLL_CTL(id));
508 	udelay(20);
509 }
510 
511 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
512 				struct intel_shared_dpll *pll)
513 {
514 	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
515 	POSTING_READ(SPLL_CTL);
516 	udelay(20);
517 }
518 
519 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
520 				  struct intel_shared_dpll *pll)
521 {
522 	const enum intel_dpll_id id = pll->info->id;
523 	u32 val;
524 
525 	val = I915_READ(WRPLL_CTL(id));
526 	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
527 	POSTING_READ(WRPLL_CTL(id));
528 }
529 
530 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
531 				 struct intel_shared_dpll *pll)
532 {
533 	u32 val;
534 
535 	val = I915_READ(SPLL_CTL);
536 	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
537 	POSTING_READ(SPLL_CTL);
538 }
539 
540 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
541 				       struct intel_shared_dpll *pll,
542 				       struct intel_dpll_hw_state *hw_state)
543 {
544 	const enum intel_dpll_id id = pll->info->id;
545 	intel_wakeref_t wakeref;
546 	u32 val;
547 
548 	wakeref = intel_display_power_get_if_enabled(dev_priv,
549 						     POWER_DOMAIN_DISPLAY_CORE);
550 	if (!wakeref)
551 		return false;
552 
553 	val = I915_READ(WRPLL_CTL(id));
554 	hw_state->wrpll = val;
555 
556 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
557 
558 	return val & WRPLL_PLL_ENABLE;
559 }
560 
561 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
562 				      struct intel_shared_dpll *pll,
563 				      struct intel_dpll_hw_state *hw_state)
564 {
565 	intel_wakeref_t wakeref;
566 	u32 val;
567 
568 	wakeref = intel_display_power_get_if_enabled(dev_priv,
569 						     POWER_DOMAIN_DISPLAY_CORE);
570 	if (!wakeref)
571 		return false;
572 
573 	val = I915_READ(SPLL_CTL);
574 	hw_state->spll = val;
575 
576 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
577 
578 	return val & SPLL_PLL_ENABLE;
579 }
580 
581 #define LC_FREQ 2700
582 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
583 
584 #define P_MIN 2
585 #define P_MAX 64
586 #define P_INC 2
587 
588 /* Constraints for PLL good behavior */
589 #define REF_MIN 48
590 #define REF_MAX 400
591 #define VCO_MIN 2400
592 #define VCO_MAX 4800
593 
594 struct hsw_wrpll_rnp {
595 	unsigned p, n2, r2;
596 };
597 
598 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
599 {
600 	unsigned budget;
601 
602 	switch (clock) {
603 	case 25175000:
604 	case 25200000:
605 	case 27000000:
606 	case 27027000:
607 	case 37762500:
608 	case 37800000:
609 	case 40500000:
610 	case 40541000:
611 	case 54000000:
612 	case 54054000:
613 	case 59341000:
614 	case 59400000:
615 	case 72000000:
616 	case 74176000:
617 	case 74250000:
618 	case 81000000:
619 	case 81081000:
620 	case 89012000:
621 	case 89100000:
622 	case 108000000:
623 	case 108108000:
624 	case 111264000:
625 	case 111375000:
626 	case 148352000:
627 	case 148500000:
628 	case 162000000:
629 	case 162162000:
630 	case 222525000:
631 	case 222750000:
632 	case 296703000:
633 	case 297000000:
634 		budget = 0;
635 		break;
636 	case 233500000:
637 	case 245250000:
638 	case 247750000:
639 	case 253250000:
640 	case 298000000:
641 		budget = 1500;
642 		break;
643 	case 169128000:
644 	case 169500000:
645 	case 179500000:
646 	case 202000000:
647 		budget = 2000;
648 		break;
649 	case 256250000:
650 	case 262500000:
651 	case 270000000:
652 	case 272500000:
653 	case 273750000:
654 	case 280750000:
655 	case 281250000:
656 	case 286000000:
657 	case 291750000:
658 		budget = 4000;
659 		break;
660 	case 267250000:
661 	case 268500000:
662 		budget = 5000;
663 		break;
664 	default:
665 		budget = 1000;
666 		break;
667 	}
668 
669 	return budget;
670 }
671 
672 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
673 				 unsigned int r2, unsigned int n2,
674 				 unsigned int p,
675 				 struct hsw_wrpll_rnp *best)
676 {
677 	u64 a, b, c, d, diff, diff_best;
678 
679 	/* No best (r,n,p) yet */
680 	if (best->p == 0) {
681 		best->p = p;
682 		best->n2 = n2;
683 		best->r2 = r2;
684 		return;
685 	}
686 
687 	/*
688 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
689 	 * freq2k.
690 	 *
691 	 * delta = 1e6 *
692 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
693 	 *	   freq2k;
694 	 *
695 	 * and we would like delta <= budget.
696 	 *
697 	 * If the discrepancy is above the PPM-based budget, always prefer to
698 	 * improve upon the previous solution.  However, if you're within the
699 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
700 	 */
701 	a = freq2k * budget * p * r2;
702 	b = freq2k * budget * best->p * best->r2;
703 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
704 	diff_best = abs_diff(freq2k * best->p * best->r2,
705 			     LC_FREQ_2K * best->n2);
706 	c = 1000000 * diff;
707 	d = 1000000 * diff_best;
708 
709 	if (a < c && b < d) {
710 		/* If both are above the budget, pick the closer */
711 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
712 			best->p = p;
713 			best->n2 = n2;
714 			best->r2 = r2;
715 		}
716 	} else if (a >= c && b < d) {
717 		/* If A is below the threshold but B is above it?  Update. */
718 		best->p = p;
719 		best->n2 = n2;
720 		best->r2 = r2;
721 	} else if (a >= c && b >= d) {
722 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
723 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
724 			best->p = p;
725 			best->n2 = n2;
726 			best->r2 = r2;
727 		}
728 	}
729 	/* Otherwise a < c && b >= d, do nothing */
730 }
731 
732 static void
733 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
734 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
735 {
736 	u64 freq2k;
737 	unsigned p, n2, r2;
738 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
739 	unsigned budget;
740 
741 	freq2k = clock / 100;
742 
743 	budget = hsw_wrpll_get_budget_for_freq(clock);
744 
745 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
746 	 * and directly pass the LC PLL to it. */
747 	if (freq2k == 5400000) {
748 		*n2_out = 2;
749 		*p_out = 1;
750 		*r2_out = 2;
751 		return;
752 	}
753 
754 	/*
755 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
756 	 * the WR PLL.
757 	 *
758 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
759 	 * Injecting R2 = 2 * R gives:
760 	 *   REF_MAX * r2 > LC_FREQ * 2 and
761 	 *   REF_MIN * r2 < LC_FREQ * 2
762 	 *
763 	 * Which means the desired boundaries for r2 are:
764 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
765 	 *
766 	 */
767 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
768 	     r2 <= LC_FREQ * 2 / REF_MIN;
769 	     r2++) {
770 
771 		/*
772 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
773 		 *
774 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
775 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
776 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
777 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
778 		 *
779 		 * Which means the desired boundaries for n2 are:
780 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
781 		 */
782 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
783 		     n2 <= VCO_MAX * r2 / LC_FREQ;
784 		     n2++) {
785 
786 			for (p = P_MIN; p <= P_MAX; p += P_INC)
787 				hsw_wrpll_update_rnp(freq2k, budget,
788 						     r2, n2, p, &best);
789 		}
790 	}
791 
792 	*n2_out = best.n2;
793 	*p_out = best.p;
794 	*r2_out = best.r2;
795 }
796 
797 static struct intel_shared_dpll *
798 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
799 		      struct intel_crtc *crtc)
800 {
801 	struct intel_crtc_state *crtc_state =
802 		intel_atomic_get_new_crtc_state(state, crtc);
803 	struct intel_shared_dpll *pll;
804 	u32 val;
805 	unsigned int p, n2, r2;
806 
807 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
808 
809 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
810 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
811 	      WRPLL_DIVIDER_POST(p);
812 
813 	crtc_state->dpll_hw_state.wrpll = val;
814 
815 	pll = intel_find_shared_dpll(state, crtc,
816 				     &crtc_state->dpll_hw_state,
817 				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
818 
819 	if (!pll)
820 		return NULL;
821 
822 	return pll;
823 }
824 
825 static struct intel_shared_dpll *
826 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
827 {
828 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
829 	struct intel_shared_dpll *pll;
830 	enum intel_dpll_id pll_id;
831 	int clock = crtc_state->port_clock;
832 
833 	switch (clock / 2) {
834 	case 81000:
835 		pll_id = DPLL_ID_LCPLL_810;
836 		break;
837 	case 135000:
838 		pll_id = DPLL_ID_LCPLL_1350;
839 		break;
840 	case 270000:
841 		pll_id = DPLL_ID_LCPLL_2700;
842 		break;
843 	default:
844 		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
845 		return NULL;
846 	}
847 
848 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
849 
850 	if (!pll)
851 		return NULL;
852 
853 	return pll;
854 }
855 
856 static bool hsw_get_dpll(struct intel_atomic_state *state,
857 			 struct intel_crtc *crtc,
858 			 struct intel_encoder *encoder)
859 {
860 	struct intel_crtc_state *crtc_state =
861 		intel_atomic_get_new_crtc_state(state, crtc);
862 	struct intel_shared_dpll *pll;
863 
864 	memset(&crtc_state->dpll_hw_state, 0,
865 	       sizeof(crtc_state->dpll_hw_state));
866 
867 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
868 		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
869 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
870 		pll = hsw_ddi_dp_get_dpll(crtc_state);
871 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
872 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
873 			return false;
874 
875 		crtc_state->dpll_hw_state.spll =
876 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
877 
878 		pll = intel_find_shared_dpll(state, crtc,
879 					     &crtc_state->dpll_hw_state,
880 					     DPLL_ID_SPLL, DPLL_ID_SPLL);
881 	} else {
882 		return false;
883 	}
884 
885 	if (!pll)
886 		return false;
887 
888 	intel_reference_shared_dpll(state, crtc,
889 				    pll, &crtc_state->dpll_hw_state);
890 
891 	crtc_state->shared_dpll = pll;
892 
893 	return true;
894 }
895 
896 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
897 			      const struct intel_dpll_hw_state *hw_state)
898 {
899 	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
900 		      hw_state->wrpll, hw_state->spll);
901 }
902 
903 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
904 	.enable = hsw_ddi_wrpll_enable,
905 	.disable = hsw_ddi_wrpll_disable,
906 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
907 };
908 
909 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
910 	.enable = hsw_ddi_spll_enable,
911 	.disable = hsw_ddi_spll_disable,
912 	.get_hw_state = hsw_ddi_spll_get_hw_state,
913 };
914 
915 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
916 				 struct intel_shared_dpll *pll)
917 {
918 }
919 
920 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
921 				  struct intel_shared_dpll *pll)
922 {
923 }
924 
925 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
926 				       struct intel_shared_dpll *pll,
927 				       struct intel_dpll_hw_state *hw_state)
928 {
929 	return true;
930 }
931 
932 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
933 	.enable = hsw_ddi_lcpll_enable,
934 	.disable = hsw_ddi_lcpll_disable,
935 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
936 };
937 
938 struct skl_dpll_regs {
939 	i915_reg_t ctl, cfgcr1, cfgcr2;
940 };
941 
942 /* this array is indexed by the *shared* pll id */
943 static const struct skl_dpll_regs skl_dpll_regs[4] = {
944 	{
945 		/* DPLL 0 */
946 		.ctl = LCPLL1_CTL,
947 		/* DPLL 0 doesn't support HDMI mode */
948 	},
949 	{
950 		/* DPLL 1 */
951 		.ctl = LCPLL2_CTL,
952 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
953 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
954 	},
955 	{
956 		/* DPLL 2 */
957 		.ctl = WRPLL_CTL(0),
958 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
959 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
960 	},
961 	{
962 		/* DPLL 3 */
963 		.ctl = WRPLL_CTL(1),
964 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
965 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
966 	},
967 };
968 
969 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
970 				    struct intel_shared_dpll *pll)
971 {
972 	const enum intel_dpll_id id = pll->info->id;
973 	u32 val;
974 
975 	val = I915_READ(DPLL_CTRL1);
976 
977 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
978 		 DPLL_CTRL1_SSC(id) |
979 		 DPLL_CTRL1_LINK_RATE_MASK(id));
980 	val |= pll->state.hw_state.ctrl1 << (id * 6);
981 
982 	I915_WRITE(DPLL_CTRL1, val);
983 	POSTING_READ(DPLL_CTRL1);
984 }
985 
986 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
987 			       struct intel_shared_dpll *pll)
988 {
989 	const struct skl_dpll_regs *regs = skl_dpll_regs;
990 	const enum intel_dpll_id id = pll->info->id;
991 
992 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
993 
994 	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
995 	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
996 	POSTING_READ(regs[id].cfgcr1);
997 	POSTING_READ(regs[id].cfgcr2);
998 
999 	/* the enable bit is always bit 31 */
1000 	I915_WRITE(regs[id].ctl,
1001 		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
1002 
1003 	if (intel_wait_for_register(&dev_priv->uncore,
1004 				    DPLL_STATUS,
1005 				    DPLL_LOCK(id),
1006 				    DPLL_LOCK(id),
1007 				    5))
1008 		DRM_ERROR("DPLL %d not locked\n", id);
1009 }
1010 
1011 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1012 				 struct intel_shared_dpll *pll)
1013 {
1014 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1015 }
1016 
1017 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1018 				struct intel_shared_dpll *pll)
1019 {
1020 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1021 	const enum intel_dpll_id id = pll->info->id;
1022 
1023 	/* the enable bit is always bit 31 */
1024 	I915_WRITE(regs[id].ctl,
1025 		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1026 	POSTING_READ(regs[id].ctl);
1027 }
1028 
1029 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1030 				  struct intel_shared_dpll *pll)
1031 {
1032 }
1033 
1034 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1035 				     struct intel_shared_dpll *pll,
1036 				     struct intel_dpll_hw_state *hw_state)
1037 {
1038 	u32 val;
1039 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1040 	const enum intel_dpll_id id = pll->info->id;
1041 	intel_wakeref_t wakeref;
1042 	bool ret;
1043 
1044 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1045 						     POWER_DOMAIN_DISPLAY_CORE);
1046 	if (!wakeref)
1047 		return false;
1048 
1049 	ret = false;
1050 
1051 	val = I915_READ(regs[id].ctl);
1052 	if (!(val & LCPLL_PLL_ENABLE))
1053 		goto out;
1054 
1055 	val = I915_READ(DPLL_CTRL1);
1056 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1057 
1058 	/* avoid reading back stale values if HDMI mode is not enabled */
1059 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1060 		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1061 		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1062 	}
1063 	ret = true;
1064 
1065 out:
1066 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1067 
1068 	return ret;
1069 }
1070 
1071 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1072 				       struct intel_shared_dpll *pll,
1073 				       struct intel_dpll_hw_state *hw_state)
1074 {
1075 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1076 	const enum intel_dpll_id id = pll->info->id;
1077 	intel_wakeref_t wakeref;
1078 	u32 val;
1079 	bool ret;
1080 
1081 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1082 						     POWER_DOMAIN_DISPLAY_CORE);
1083 	if (!wakeref)
1084 		return false;
1085 
1086 	ret = false;
1087 
1088 	/* DPLL0 is always enabled since it drives CDCLK */
1089 	val = I915_READ(regs[id].ctl);
1090 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1091 		goto out;
1092 
1093 	val = I915_READ(DPLL_CTRL1);
1094 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1095 
1096 	ret = true;
1097 
1098 out:
1099 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1100 
1101 	return ret;
1102 }
1103 
1104 struct skl_wrpll_context {
1105 	u64 min_deviation;		/* current minimal deviation */
1106 	u64 central_freq;		/* chosen central freq */
1107 	u64 dco_freq;			/* chosen dco freq */
1108 	unsigned int p;			/* chosen divider */
1109 };
1110 
1111 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1112 {
1113 	memset(ctx, 0, sizeof(*ctx));
1114 
1115 	ctx->min_deviation = U64_MAX;
1116 }
1117 
1118 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1119 #define SKL_DCO_MAX_PDEVIATION	100
1120 #define SKL_DCO_MAX_NDEVIATION	600
1121 
1122 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1123 				  u64 central_freq,
1124 				  u64 dco_freq,
1125 				  unsigned int divider)
1126 {
1127 	u64 deviation;
1128 
1129 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1130 			      central_freq);
1131 
1132 	/* positive deviation */
1133 	if (dco_freq >= central_freq) {
1134 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1135 		    deviation < ctx->min_deviation) {
1136 			ctx->min_deviation = deviation;
1137 			ctx->central_freq = central_freq;
1138 			ctx->dco_freq = dco_freq;
1139 			ctx->p = divider;
1140 		}
1141 	/* negative deviation */
1142 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1143 		   deviation < ctx->min_deviation) {
1144 		ctx->min_deviation = deviation;
1145 		ctx->central_freq = central_freq;
1146 		ctx->dco_freq = dco_freq;
1147 		ctx->p = divider;
1148 	}
1149 }
1150 
1151 static void skl_wrpll_get_multipliers(unsigned int p,
1152 				      unsigned int *p0 /* out */,
1153 				      unsigned int *p1 /* out */,
1154 				      unsigned int *p2 /* out */)
1155 {
1156 	/* even dividers */
1157 	if (p % 2 == 0) {
1158 		unsigned int half = p / 2;
1159 
1160 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1161 			*p0 = 2;
1162 			*p1 = 1;
1163 			*p2 = half;
1164 		} else if (half % 2 == 0) {
1165 			*p0 = 2;
1166 			*p1 = half / 2;
1167 			*p2 = 2;
1168 		} else if (half % 3 == 0) {
1169 			*p0 = 3;
1170 			*p1 = half / 3;
1171 			*p2 = 2;
1172 		} else if (half % 7 == 0) {
1173 			*p0 = 7;
1174 			*p1 = half / 7;
1175 			*p2 = 2;
1176 		}
1177 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1178 		*p0 = 3;
1179 		*p1 = 1;
1180 		*p2 = p / 3;
1181 	} else if (p == 5 || p == 7) {
1182 		*p0 = p;
1183 		*p1 = 1;
1184 		*p2 = 1;
1185 	} else if (p == 15) {
1186 		*p0 = 3;
1187 		*p1 = 1;
1188 		*p2 = 5;
1189 	} else if (p == 21) {
1190 		*p0 = 7;
1191 		*p1 = 1;
1192 		*p2 = 3;
1193 	} else if (p == 35) {
1194 		*p0 = 7;
1195 		*p1 = 1;
1196 		*p2 = 5;
1197 	}
1198 }
1199 
1200 struct skl_wrpll_params {
1201 	u32 dco_fraction;
1202 	u32 dco_integer;
1203 	u32 qdiv_ratio;
1204 	u32 qdiv_mode;
1205 	u32 kdiv;
1206 	u32 pdiv;
1207 	u32 central_freq;
1208 };
1209 
1210 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1211 				      u64 afe_clock,
1212 				      u64 central_freq,
1213 				      u32 p0, u32 p1, u32 p2)
1214 {
1215 	u64 dco_freq;
1216 
1217 	switch (central_freq) {
1218 	case 9600000000ULL:
1219 		params->central_freq = 0;
1220 		break;
1221 	case 9000000000ULL:
1222 		params->central_freq = 1;
1223 		break;
1224 	case 8400000000ULL:
1225 		params->central_freq = 3;
1226 	}
1227 
1228 	switch (p0) {
1229 	case 1:
1230 		params->pdiv = 0;
1231 		break;
1232 	case 2:
1233 		params->pdiv = 1;
1234 		break;
1235 	case 3:
1236 		params->pdiv = 2;
1237 		break;
1238 	case 7:
1239 		params->pdiv = 4;
1240 		break;
1241 	default:
1242 		WARN(1, "Incorrect PDiv\n");
1243 	}
1244 
1245 	switch (p2) {
1246 	case 5:
1247 		params->kdiv = 0;
1248 		break;
1249 	case 2:
1250 		params->kdiv = 1;
1251 		break;
1252 	case 3:
1253 		params->kdiv = 2;
1254 		break;
1255 	case 1:
1256 		params->kdiv = 3;
1257 		break;
1258 	default:
1259 		WARN(1, "Incorrect KDiv\n");
1260 	}
1261 
1262 	params->qdiv_ratio = p1;
1263 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1264 
1265 	dco_freq = p0 * p1 * p2 * afe_clock;
1266 
1267 	/*
1268 	 * Intermediate values are in Hz.
1269 	 * Divide by MHz to match bsepc
1270 	 */
1271 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1272 	params->dco_fraction =
1273 		div_u64((div_u64(dco_freq, 24) -
1274 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1275 }
1276 
1277 static bool
1278 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1279 			struct skl_wrpll_params *wrpll_params)
1280 {
1281 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1282 	u64 dco_central_freq[3] = { 8400000000ULL,
1283 				    9000000000ULL,
1284 				    9600000000ULL };
1285 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1286 					     24, 28, 30, 32, 36, 40, 42, 44,
1287 					     48, 52, 54, 56, 60, 64, 66, 68,
1288 					     70, 72, 76, 78, 80, 84, 88, 90,
1289 					     92, 96, 98 };
1290 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1291 	static const struct {
1292 		const int *list;
1293 		int n_dividers;
1294 	} dividers[] = {
1295 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1296 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1297 	};
1298 	struct skl_wrpll_context ctx;
1299 	unsigned int dco, d, i;
1300 	unsigned int p0, p1, p2;
1301 
1302 	skl_wrpll_context_init(&ctx);
1303 
1304 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1305 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1306 			for (i = 0; i < dividers[d].n_dividers; i++) {
1307 				unsigned int p = dividers[d].list[i];
1308 				u64 dco_freq = p * afe_clock;
1309 
1310 				skl_wrpll_try_divider(&ctx,
1311 						      dco_central_freq[dco],
1312 						      dco_freq,
1313 						      p);
1314 				/*
1315 				 * Skip the remaining dividers if we're sure to
1316 				 * have found the definitive divider, we can't
1317 				 * improve a 0 deviation.
1318 				 */
1319 				if (ctx.min_deviation == 0)
1320 					goto skip_remaining_dividers;
1321 			}
1322 		}
1323 
1324 skip_remaining_dividers:
1325 		/*
1326 		 * If a solution is found with an even divider, prefer
1327 		 * this one.
1328 		 */
1329 		if (d == 0 && ctx.p)
1330 			break;
1331 	}
1332 
1333 	if (!ctx.p) {
1334 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1335 		return false;
1336 	}
1337 
1338 	/*
1339 	 * gcc incorrectly analyses that these can be used without being
1340 	 * initialized. To be fair, it's hard to guess.
1341 	 */
1342 	p0 = p1 = p2 = 0;
1343 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1344 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1345 				  p0, p1, p2);
1346 
1347 	return true;
1348 }
1349 
1350 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1351 {
1352 	u32 ctrl1, cfgcr1, cfgcr2;
1353 	struct skl_wrpll_params wrpll_params = { 0, };
1354 
1355 	/*
1356 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1357 	 * as the DPLL id in this function.
1358 	 */
1359 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1360 
1361 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1362 
1363 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1364 				     &wrpll_params))
1365 		return false;
1366 
1367 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1368 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1369 		wrpll_params.dco_integer;
1370 
1371 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1372 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1373 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1374 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1375 		wrpll_params.central_freq;
1376 
1377 	memset(&crtc_state->dpll_hw_state, 0,
1378 	       sizeof(crtc_state->dpll_hw_state));
1379 
1380 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1381 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1382 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1383 	return true;
1384 }
1385 
1386 static bool
1387 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1388 {
1389 	u32 ctrl1;
1390 
1391 	/*
1392 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1393 	 * as the DPLL id in this function.
1394 	 */
1395 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1396 	switch (crtc_state->port_clock / 2) {
1397 	case 81000:
1398 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1399 		break;
1400 	case 135000:
1401 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1402 		break;
1403 	case 270000:
1404 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1405 		break;
1406 		/* eDP 1.4 rates */
1407 	case 162000:
1408 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1409 		break;
1410 	case 108000:
1411 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1412 		break;
1413 	case 216000:
1414 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1415 		break;
1416 	}
1417 
1418 	memset(&crtc_state->dpll_hw_state, 0,
1419 	       sizeof(crtc_state->dpll_hw_state));
1420 
1421 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1422 
1423 	return true;
1424 }
1425 
1426 static bool skl_get_dpll(struct intel_atomic_state *state,
1427 			 struct intel_crtc *crtc,
1428 			 struct intel_encoder *encoder)
1429 {
1430 	struct intel_crtc_state *crtc_state =
1431 		intel_atomic_get_new_crtc_state(state, crtc);
1432 	struct intel_shared_dpll *pll;
1433 	bool bret;
1434 
1435 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1436 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1437 		if (!bret) {
1438 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1439 			return false;
1440 		}
1441 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1442 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1443 		if (!bret) {
1444 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1445 			return false;
1446 		}
1447 	} else {
1448 		return false;
1449 	}
1450 
1451 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1452 		pll = intel_find_shared_dpll(state, crtc,
1453 					     &crtc_state->dpll_hw_state,
1454 					     DPLL_ID_SKL_DPLL0,
1455 					     DPLL_ID_SKL_DPLL0);
1456 	else
1457 		pll = intel_find_shared_dpll(state, crtc,
1458 					     &crtc_state->dpll_hw_state,
1459 					     DPLL_ID_SKL_DPLL1,
1460 					     DPLL_ID_SKL_DPLL3);
1461 	if (!pll)
1462 		return false;
1463 
1464 	intel_reference_shared_dpll(state, crtc,
1465 				    pll, &crtc_state->dpll_hw_state);
1466 
1467 	crtc_state->shared_dpll = pll;
1468 
1469 	return true;
1470 }
1471 
1472 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1473 			      const struct intel_dpll_hw_state *hw_state)
1474 {
1475 	DRM_DEBUG_KMS("dpll_hw_state: "
1476 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1477 		      hw_state->ctrl1,
1478 		      hw_state->cfgcr1,
1479 		      hw_state->cfgcr2);
1480 }
1481 
1482 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1483 	.enable = skl_ddi_pll_enable,
1484 	.disable = skl_ddi_pll_disable,
1485 	.get_hw_state = skl_ddi_pll_get_hw_state,
1486 };
1487 
1488 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1489 	.enable = skl_ddi_dpll0_enable,
1490 	.disable = skl_ddi_dpll0_disable,
1491 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1492 };
1493 
1494 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1495 				struct intel_shared_dpll *pll)
1496 {
1497 	u32 temp;
1498 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1499 	enum dpio_phy phy;
1500 	enum dpio_channel ch;
1501 
1502 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1503 
1504 	/* Non-SSC reference */
1505 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1506 	temp |= PORT_PLL_REF_SEL;
1507 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1508 
1509 	if (IS_GEMINILAKE(dev_priv)) {
1510 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1511 		temp |= PORT_PLL_POWER_ENABLE;
1512 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1513 
1514 		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1515 				 PORT_PLL_POWER_STATE), 200))
1516 			DRM_ERROR("Power state not set for PLL:%d\n", port);
1517 	}
1518 
1519 	/* Disable 10 bit clock */
1520 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1521 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1522 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1523 
1524 	/* Write P1 & P2 */
1525 	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1526 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1527 	temp |= pll->state.hw_state.ebb0;
1528 	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1529 
1530 	/* Write M2 integer */
1531 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1532 	temp &= ~PORT_PLL_M2_MASK;
1533 	temp |= pll->state.hw_state.pll0;
1534 	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1535 
1536 	/* Write N */
1537 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1538 	temp &= ~PORT_PLL_N_MASK;
1539 	temp |= pll->state.hw_state.pll1;
1540 	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1541 
1542 	/* Write M2 fraction */
1543 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1544 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1545 	temp |= pll->state.hw_state.pll2;
1546 	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1547 
1548 	/* Write M2 fraction enable */
1549 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1550 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1551 	temp |= pll->state.hw_state.pll3;
1552 	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1553 
1554 	/* Write coeff */
1555 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1556 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1557 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1558 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1559 	temp |= pll->state.hw_state.pll6;
1560 	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1561 
1562 	/* Write calibration val */
1563 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1564 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1565 	temp |= pll->state.hw_state.pll8;
1566 	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1567 
1568 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1569 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1570 	temp |= pll->state.hw_state.pll9;
1571 	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1572 
1573 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1574 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1575 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1576 	temp |= pll->state.hw_state.pll10;
1577 	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1578 
1579 	/* Recalibrate with new settings */
1580 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1581 	temp |= PORT_PLL_RECALIBRATE;
1582 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1583 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1584 	temp |= pll->state.hw_state.ebb4;
1585 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1586 
1587 	/* Enable PLL */
1588 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1589 	temp |= PORT_PLL_ENABLE;
1590 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1591 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1592 
1593 	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1594 			200))
1595 		DRM_ERROR("PLL %d not locked\n", port);
1596 
1597 	if (IS_GEMINILAKE(dev_priv)) {
1598 		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1599 		temp |= DCC_DELAY_RANGE_2;
1600 		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1601 	}
1602 
1603 	/*
1604 	 * While we write to the group register to program all lanes at once we
1605 	 * can read only lane registers and we pick lanes 0/1 for that.
1606 	 */
1607 	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1608 	temp &= ~LANE_STAGGER_MASK;
1609 	temp &= ~LANESTAGGER_STRAP_OVRD;
1610 	temp |= pll->state.hw_state.pcsdw12;
1611 	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1612 }
1613 
1614 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1615 					struct intel_shared_dpll *pll)
1616 {
1617 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1618 	u32 temp;
1619 
1620 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1621 	temp &= ~PORT_PLL_ENABLE;
1622 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1623 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1624 
1625 	if (IS_GEMINILAKE(dev_priv)) {
1626 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1627 		temp &= ~PORT_PLL_POWER_ENABLE;
1628 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1629 
1630 		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1631 				PORT_PLL_POWER_STATE), 200))
1632 			DRM_ERROR("Power state not reset for PLL:%d\n", port);
1633 	}
1634 }
1635 
1636 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1637 					struct intel_shared_dpll *pll,
1638 					struct intel_dpll_hw_state *hw_state)
1639 {
1640 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1641 	intel_wakeref_t wakeref;
1642 	enum dpio_phy phy;
1643 	enum dpio_channel ch;
1644 	u32 val;
1645 	bool ret;
1646 
1647 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1648 
1649 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1650 						     POWER_DOMAIN_DISPLAY_CORE);
1651 	if (!wakeref)
1652 		return false;
1653 
1654 	ret = false;
1655 
1656 	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1657 	if (!(val & PORT_PLL_ENABLE))
1658 		goto out;
1659 
1660 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1661 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1662 
1663 	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1664 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1665 
1666 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1667 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1668 
1669 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1670 	hw_state->pll1 &= PORT_PLL_N_MASK;
1671 
1672 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1673 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1674 
1675 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1676 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1677 
1678 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1679 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1680 			  PORT_PLL_INT_COEFF_MASK |
1681 			  PORT_PLL_GAIN_CTL_MASK;
1682 
1683 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1684 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1685 
1686 	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1687 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1688 
1689 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1690 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1691 			   PORT_PLL_DCO_AMP_MASK;
1692 
1693 	/*
1694 	 * While we write to the group register to program all lanes at once we
1695 	 * can read only lane registers. We configure all lanes the same way, so
1696 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1697 	 */
1698 	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1699 	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1700 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1701 				 hw_state->pcsdw12,
1702 				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1703 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1704 
1705 	ret = true;
1706 
1707 out:
1708 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1709 
1710 	return ret;
1711 }
1712 
1713 /* bxt clock parameters */
1714 struct bxt_clk_div {
1715 	int clock;
1716 	u32 p1;
1717 	u32 p2;
1718 	u32 m2_int;
1719 	u32 m2_frac;
1720 	bool m2_frac_en;
1721 	u32 n;
1722 
1723 	int vco;
1724 };
1725 
1726 /* pre-calculated values for DP linkrates */
1727 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1728 	{162000, 4, 2, 32, 1677722, 1, 1},
1729 	{270000, 4, 1, 27,       0, 0, 1},
1730 	{540000, 2, 1, 27,       0, 0, 1},
1731 	{216000, 3, 2, 32, 1677722, 1, 1},
1732 	{243000, 4, 1, 24, 1258291, 1, 1},
1733 	{324000, 4, 1, 32, 1677722, 1, 1},
1734 	{432000, 3, 1, 32, 1677722, 1, 1}
1735 };
1736 
1737 static bool
1738 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1739 			  struct bxt_clk_div *clk_div)
1740 {
1741 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1742 	struct dpll best_clock;
1743 
1744 	/* Calculate HDMI div */
1745 	/*
1746 	 * FIXME: tie the following calculation into
1747 	 * i9xx_crtc_compute_clock
1748 	 */
1749 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1750 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1751 				 crtc_state->port_clock,
1752 				 pipe_name(crtc->pipe));
1753 		return false;
1754 	}
1755 
1756 	clk_div->p1 = best_clock.p1;
1757 	clk_div->p2 = best_clock.p2;
1758 	WARN_ON(best_clock.m1 != 2);
1759 	clk_div->n = best_clock.n;
1760 	clk_div->m2_int = best_clock.m2 >> 22;
1761 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1762 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1763 
1764 	clk_div->vco = best_clock.vco;
1765 
1766 	return true;
1767 }
1768 
1769 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1770 				    struct bxt_clk_div *clk_div)
1771 {
1772 	int clock = crtc_state->port_clock;
1773 	int i;
1774 
1775 	*clk_div = bxt_dp_clk_val[0];
1776 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1777 		if (bxt_dp_clk_val[i].clock == clock) {
1778 			*clk_div = bxt_dp_clk_val[i];
1779 			break;
1780 		}
1781 	}
1782 
1783 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1784 }
1785 
1786 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1787 				      const struct bxt_clk_div *clk_div)
1788 {
1789 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1790 	int clock = crtc_state->port_clock;
1791 	int vco = clk_div->vco;
1792 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1793 	u32 lanestagger;
1794 
1795 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1796 
1797 	if (vco >= 6200000 && vco <= 6700000) {
1798 		prop_coef = 4;
1799 		int_coef = 9;
1800 		gain_ctl = 3;
1801 		targ_cnt = 8;
1802 	} else if ((vco > 5400000 && vco < 6200000) ||
1803 			(vco >= 4800000 && vco < 5400000)) {
1804 		prop_coef = 5;
1805 		int_coef = 11;
1806 		gain_ctl = 3;
1807 		targ_cnt = 9;
1808 	} else if (vco == 5400000) {
1809 		prop_coef = 3;
1810 		int_coef = 8;
1811 		gain_ctl = 1;
1812 		targ_cnt = 9;
1813 	} else {
1814 		DRM_ERROR("Invalid VCO\n");
1815 		return false;
1816 	}
1817 
1818 	if (clock > 270000)
1819 		lanestagger = 0x18;
1820 	else if (clock > 135000)
1821 		lanestagger = 0x0d;
1822 	else if (clock > 67000)
1823 		lanestagger = 0x07;
1824 	else if (clock > 33000)
1825 		lanestagger = 0x04;
1826 	else
1827 		lanestagger = 0x02;
1828 
1829 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1830 	dpll_hw_state->pll0 = clk_div->m2_int;
1831 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1832 	dpll_hw_state->pll2 = clk_div->m2_frac;
1833 
1834 	if (clk_div->m2_frac_en)
1835 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1836 
1837 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1838 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1839 
1840 	dpll_hw_state->pll8 = targ_cnt;
1841 
1842 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1843 
1844 	dpll_hw_state->pll10 =
1845 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1846 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1847 
1848 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1849 
1850 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1851 
1852 	return true;
1853 }
1854 
1855 static bool
1856 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1857 {
1858 	struct bxt_clk_div clk_div = {};
1859 
1860 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1861 
1862 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1863 }
1864 
1865 static bool
1866 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1867 {
1868 	struct bxt_clk_div clk_div = {};
1869 
1870 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1871 
1872 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1873 }
1874 
1875 static bool bxt_get_dpll(struct intel_atomic_state *state,
1876 			 struct intel_crtc *crtc,
1877 			 struct intel_encoder *encoder)
1878 {
1879 	struct intel_crtc_state *crtc_state =
1880 		intel_atomic_get_new_crtc_state(state, crtc);
1881 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1882 	struct intel_shared_dpll *pll;
1883 	enum intel_dpll_id id;
1884 
1885 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1886 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1887 		return false;
1888 
1889 	if (intel_crtc_has_dp_encoder(crtc_state) &&
1890 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1891 		return false;
1892 
1893 	/* 1:1 mapping between ports and PLLs */
1894 	id = (enum intel_dpll_id) encoder->port;
1895 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
1896 
1897 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1898 		      crtc->base.base.id, crtc->base.name, pll->info->name);
1899 
1900 	intel_reference_shared_dpll(state, crtc,
1901 				    pll, &crtc_state->dpll_hw_state);
1902 
1903 	crtc_state->shared_dpll = pll;
1904 
1905 	return true;
1906 }
1907 
1908 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1909 			      const struct intel_dpll_hw_state *hw_state)
1910 {
1911 	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1912 		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1913 		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1914 		      hw_state->ebb0,
1915 		      hw_state->ebb4,
1916 		      hw_state->pll0,
1917 		      hw_state->pll1,
1918 		      hw_state->pll2,
1919 		      hw_state->pll3,
1920 		      hw_state->pll6,
1921 		      hw_state->pll8,
1922 		      hw_state->pll9,
1923 		      hw_state->pll10,
1924 		      hw_state->pcsdw12);
1925 }
1926 
1927 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1928 	.enable = bxt_ddi_pll_enable,
1929 	.disable = bxt_ddi_pll_disable,
1930 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1931 };
1932 
1933 struct intel_dpll_mgr {
1934 	const struct dpll_info *dpll_info;
1935 
1936 	bool (*get_dplls)(struct intel_atomic_state *state,
1937 			  struct intel_crtc *crtc,
1938 			  struct intel_encoder *encoder);
1939 	void (*put_dplls)(struct intel_atomic_state *state,
1940 			  struct intel_crtc *crtc);
1941 	void (*update_active_dpll)(struct intel_atomic_state *state,
1942 				   struct intel_crtc *crtc,
1943 				   struct intel_encoder *encoder);
1944 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1945 			      const struct intel_dpll_hw_state *hw_state);
1946 };
1947 
1948 static const struct dpll_info pch_plls[] = {
1949 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1950 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1951 	{ },
1952 };
1953 
1954 static const struct intel_dpll_mgr pch_pll_mgr = {
1955 	.dpll_info = pch_plls,
1956 	.get_dplls = ibx_get_dpll,
1957 	.put_dplls = intel_put_dpll,
1958 	.dump_hw_state = ibx_dump_hw_state,
1959 };
1960 
1961 static const struct dpll_info hsw_plls[] = {
1962 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1963 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1964 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1965 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1966 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1967 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1968 	{ },
1969 };
1970 
1971 static const struct intel_dpll_mgr hsw_pll_mgr = {
1972 	.dpll_info = hsw_plls,
1973 	.get_dplls = hsw_get_dpll,
1974 	.put_dplls = intel_put_dpll,
1975 	.dump_hw_state = hsw_dump_hw_state,
1976 };
1977 
1978 static const struct dpll_info skl_plls[] = {
1979 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1980 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1981 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1982 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1983 	{ },
1984 };
1985 
1986 static const struct intel_dpll_mgr skl_pll_mgr = {
1987 	.dpll_info = skl_plls,
1988 	.get_dplls = skl_get_dpll,
1989 	.put_dplls = intel_put_dpll,
1990 	.dump_hw_state = skl_dump_hw_state,
1991 };
1992 
1993 static const struct dpll_info bxt_plls[] = {
1994 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
1995 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1996 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1997 	{ },
1998 };
1999 
2000 static const struct intel_dpll_mgr bxt_pll_mgr = {
2001 	.dpll_info = bxt_plls,
2002 	.get_dplls = bxt_get_dpll,
2003 	.put_dplls = intel_put_dpll,
2004 	.dump_hw_state = bxt_dump_hw_state,
2005 };
2006 
2007 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2008 			       struct intel_shared_dpll *pll)
2009 {
2010 	const enum intel_dpll_id id = pll->info->id;
2011 	u32 val;
2012 
2013 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2014 	val = I915_READ(CNL_DPLL_ENABLE(id));
2015 	val |= PLL_POWER_ENABLE;
2016 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2017 
2018 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2019 	if (intel_wait_for_register(&dev_priv->uncore,
2020 				    CNL_DPLL_ENABLE(id),
2021 				    PLL_POWER_STATE,
2022 				    PLL_POWER_STATE,
2023 				    5))
2024 		DRM_ERROR("PLL %d Power not enabled\n", id);
2025 
2026 	/*
2027 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2028 	 * select DP mode, and set DP link rate.
2029 	 */
2030 	val = pll->state.hw_state.cfgcr0;
2031 	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2032 
2033 	/* 4. Reab back to ensure writes completed */
2034 	POSTING_READ(CNL_DPLL_CFGCR0(id));
2035 
2036 	/* 3. Configure DPLL_CFGCR0 */
2037 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2038 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2039 		val = pll->state.hw_state.cfgcr1;
2040 		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2041 		/* 4. Reab back to ensure writes completed */
2042 		POSTING_READ(CNL_DPLL_CFGCR1(id));
2043 	}
2044 
2045 	/*
2046 	 * 5. If the frequency will result in a change to the voltage
2047 	 * requirement, follow the Display Voltage Frequency Switching
2048 	 * Sequence Before Frequency Change
2049 	 *
2050 	 * Note: DVFS is actually handled via the cdclk code paths,
2051 	 * hence we do nothing here.
2052 	 */
2053 
2054 	/* 6. Enable DPLL in DPLL_ENABLE. */
2055 	val = I915_READ(CNL_DPLL_ENABLE(id));
2056 	val |= PLL_ENABLE;
2057 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2058 
2059 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2060 	if (intel_wait_for_register(&dev_priv->uncore,
2061 				    CNL_DPLL_ENABLE(id),
2062 				    PLL_LOCK,
2063 				    PLL_LOCK,
2064 				    5))
2065 		DRM_ERROR("PLL %d not locked\n", id);
2066 
2067 	/*
2068 	 * 8. If the frequency will result in a change to the voltage
2069 	 * requirement, follow the Display Voltage Frequency Switching
2070 	 * Sequence After Frequency Change
2071 	 *
2072 	 * Note: DVFS is actually handled via the cdclk code paths,
2073 	 * hence we do nothing here.
2074 	 */
2075 
2076 	/*
2077 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2078 	 * Done at intel_ddi_clk_select
2079 	 */
2080 }
2081 
2082 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2083 				struct intel_shared_dpll *pll)
2084 {
2085 	const enum intel_dpll_id id = pll->info->id;
2086 	u32 val;
2087 
2088 	/*
2089 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2090 	 * Done at intel_ddi_post_disable
2091 	 */
2092 
2093 	/*
2094 	 * 2. If the frequency will result in a change to the voltage
2095 	 * requirement, follow the Display Voltage Frequency Switching
2096 	 * Sequence Before Frequency Change
2097 	 *
2098 	 * Note: DVFS is actually handled via the cdclk code paths,
2099 	 * hence we do nothing here.
2100 	 */
2101 
2102 	/* 3. Disable DPLL through DPLL_ENABLE. */
2103 	val = I915_READ(CNL_DPLL_ENABLE(id));
2104 	val &= ~PLL_ENABLE;
2105 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2106 
2107 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2108 	if (intel_wait_for_register(&dev_priv->uncore,
2109 				    CNL_DPLL_ENABLE(id),
2110 				    PLL_LOCK,
2111 				    0,
2112 				    5))
2113 		DRM_ERROR("PLL %d locked\n", id);
2114 
2115 	/*
2116 	 * 5. If the frequency will result in a change to the voltage
2117 	 * requirement, follow the Display Voltage Frequency Switching
2118 	 * Sequence After Frequency Change
2119 	 *
2120 	 * Note: DVFS is actually handled via the cdclk code paths,
2121 	 * hence we do nothing here.
2122 	 */
2123 
2124 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2125 	val = I915_READ(CNL_DPLL_ENABLE(id));
2126 	val &= ~PLL_POWER_ENABLE;
2127 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2128 
2129 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2130 	if (intel_wait_for_register(&dev_priv->uncore,
2131 				    CNL_DPLL_ENABLE(id),
2132 				    PLL_POWER_STATE,
2133 				    0,
2134 				    5))
2135 		DRM_ERROR("PLL %d Power not disabled\n", id);
2136 }
2137 
2138 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2139 				     struct intel_shared_dpll *pll,
2140 				     struct intel_dpll_hw_state *hw_state)
2141 {
2142 	const enum intel_dpll_id id = pll->info->id;
2143 	intel_wakeref_t wakeref;
2144 	u32 val;
2145 	bool ret;
2146 
2147 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2148 						     POWER_DOMAIN_DISPLAY_CORE);
2149 	if (!wakeref)
2150 		return false;
2151 
2152 	ret = false;
2153 
2154 	val = I915_READ(CNL_DPLL_ENABLE(id));
2155 	if (!(val & PLL_ENABLE))
2156 		goto out;
2157 
2158 	val = I915_READ(CNL_DPLL_CFGCR0(id));
2159 	hw_state->cfgcr0 = val;
2160 
2161 	/* avoid reading back stale values if HDMI mode is not enabled */
2162 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2163 		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2164 	}
2165 	ret = true;
2166 
2167 out:
2168 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2169 
2170 	return ret;
2171 }
2172 
2173 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2174 				      int *qdiv, int *kdiv)
2175 {
2176 	/* even dividers */
2177 	if (bestdiv % 2 == 0) {
2178 		if (bestdiv == 2) {
2179 			*pdiv = 2;
2180 			*qdiv = 1;
2181 			*kdiv = 1;
2182 		} else if (bestdiv % 4 == 0) {
2183 			*pdiv = 2;
2184 			*qdiv = bestdiv / 4;
2185 			*kdiv = 2;
2186 		} else if (bestdiv % 6 == 0) {
2187 			*pdiv = 3;
2188 			*qdiv = bestdiv / 6;
2189 			*kdiv = 2;
2190 		} else if (bestdiv % 5 == 0) {
2191 			*pdiv = 5;
2192 			*qdiv = bestdiv / 10;
2193 			*kdiv = 2;
2194 		} else if (bestdiv % 14 == 0) {
2195 			*pdiv = 7;
2196 			*qdiv = bestdiv / 14;
2197 			*kdiv = 2;
2198 		}
2199 	} else {
2200 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2201 			*pdiv = bestdiv;
2202 			*qdiv = 1;
2203 			*kdiv = 1;
2204 		} else { /* 9, 15, 21 */
2205 			*pdiv = bestdiv / 3;
2206 			*qdiv = 1;
2207 			*kdiv = 3;
2208 		}
2209 	}
2210 }
2211 
2212 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2213 				      u32 dco_freq, u32 ref_freq,
2214 				      int pdiv, int qdiv, int kdiv)
2215 {
2216 	u32 dco;
2217 
2218 	switch (kdiv) {
2219 	case 1:
2220 		params->kdiv = 1;
2221 		break;
2222 	case 2:
2223 		params->kdiv = 2;
2224 		break;
2225 	case 3:
2226 		params->kdiv = 4;
2227 		break;
2228 	default:
2229 		WARN(1, "Incorrect KDiv\n");
2230 	}
2231 
2232 	switch (pdiv) {
2233 	case 2:
2234 		params->pdiv = 1;
2235 		break;
2236 	case 3:
2237 		params->pdiv = 2;
2238 		break;
2239 	case 5:
2240 		params->pdiv = 4;
2241 		break;
2242 	case 7:
2243 		params->pdiv = 8;
2244 		break;
2245 	default:
2246 		WARN(1, "Incorrect PDiv\n");
2247 	}
2248 
2249 	WARN_ON(kdiv != 2 && qdiv != 1);
2250 
2251 	params->qdiv_ratio = qdiv;
2252 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2253 
2254 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2255 
2256 	params->dco_integer = dco >> 15;
2257 	params->dco_fraction = dco & 0x7fff;
2258 }
2259 
2260 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2261 {
2262 	int ref_clock = dev_priv->cdclk.hw.ref;
2263 
2264 	/*
2265 	 * For ICL+, the spec states: if reference frequency is 38.4,
2266 	 * use 19.2 because the DPLL automatically divides that by 2.
2267 	 */
2268 	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2269 		ref_clock = 19200;
2270 
2271 	return ref_clock;
2272 }
2273 
2274 static bool
2275 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2276 			struct skl_wrpll_params *wrpll_params)
2277 {
2278 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2279 	u32 afe_clock = crtc_state->port_clock * 5;
2280 	u32 ref_clock;
2281 	u32 dco_min = 7998000;
2282 	u32 dco_max = 10000000;
2283 	u32 dco_mid = (dco_min + dco_max) / 2;
2284 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2285 					 18, 20, 24, 28, 30, 32,  36,  40,
2286 					 42, 44, 48, 50, 52, 54,  56,  60,
2287 					 64, 66, 68, 70, 72, 76,  78,  80,
2288 					 84, 88, 90, 92, 96, 98, 100, 102,
2289 					  3,  5,  7,  9, 15, 21 };
2290 	u32 dco, best_dco = 0, dco_centrality = 0;
2291 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2292 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2293 
2294 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2295 		dco = afe_clock * dividers[d];
2296 
2297 		if ((dco <= dco_max) && (dco >= dco_min)) {
2298 			dco_centrality = abs(dco - dco_mid);
2299 
2300 			if (dco_centrality < best_dco_centrality) {
2301 				best_dco_centrality = dco_centrality;
2302 				best_div = dividers[d];
2303 				best_dco = dco;
2304 			}
2305 		}
2306 	}
2307 
2308 	if (best_div == 0)
2309 		return false;
2310 
2311 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2312 
2313 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2314 
2315 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2316 				  pdiv, qdiv, kdiv);
2317 
2318 	return true;
2319 }
2320 
2321 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2322 {
2323 	u32 cfgcr0, cfgcr1;
2324 	struct skl_wrpll_params wrpll_params = { 0, };
2325 
2326 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2327 
2328 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2329 		return false;
2330 
2331 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2332 		wrpll_params.dco_integer;
2333 
2334 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2335 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2336 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2337 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2338 		DPLL_CFGCR1_CENTRAL_FREQ;
2339 
2340 	memset(&crtc_state->dpll_hw_state, 0,
2341 	       sizeof(crtc_state->dpll_hw_state));
2342 
2343 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2344 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2345 	return true;
2346 }
2347 
2348 static bool
2349 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2350 {
2351 	u32 cfgcr0;
2352 
2353 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2354 
2355 	switch (crtc_state->port_clock / 2) {
2356 	case 81000:
2357 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2358 		break;
2359 	case 135000:
2360 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2361 		break;
2362 	case 270000:
2363 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2364 		break;
2365 		/* eDP 1.4 rates */
2366 	case 162000:
2367 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2368 		break;
2369 	case 108000:
2370 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2371 		break;
2372 	case 216000:
2373 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2374 		break;
2375 	case 324000:
2376 		/* Some SKUs may require elevated I/O voltage to support this */
2377 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2378 		break;
2379 	case 405000:
2380 		/* Some SKUs may require elevated I/O voltage to support this */
2381 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2382 		break;
2383 	}
2384 
2385 	memset(&crtc_state->dpll_hw_state, 0,
2386 	       sizeof(crtc_state->dpll_hw_state));
2387 
2388 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2389 
2390 	return true;
2391 }
2392 
2393 static bool cnl_get_dpll(struct intel_atomic_state *state,
2394 			 struct intel_crtc *crtc,
2395 			 struct intel_encoder *encoder)
2396 {
2397 	struct intel_crtc_state *crtc_state =
2398 		intel_atomic_get_new_crtc_state(state, crtc);
2399 	struct intel_shared_dpll *pll;
2400 	bool bret;
2401 
2402 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2403 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2404 		if (!bret) {
2405 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2406 			return false;
2407 		}
2408 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2409 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2410 		if (!bret) {
2411 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2412 			return false;
2413 		}
2414 	} else {
2415 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2416 			      crtc_state->output_types);
2417 		return false;
2418 	}
2419 
2420 	pll = intel_find_shared_dpll(state, crtc,
2421 				     &crtc_state->dpll_hw_state,
2422 				     DPLL_ID_SKL_DPLL0,
2423 				     DPLL_ID_SKL_DPLL2);
2424 	if (!pll) {
2425 		DRM_DEBUG_KMS("No PLL selected\n");
2426 		return false;
2427 	}
2428 
2429 	intel_reference_shared_dpll(state, crtc,
2430 				    pll, &crtc_state->dpll_hw_state);
2431 
2432 	crtc_state->shared_dpll = pll;
2433 
2434 	return true;
2435 }
2436 
2437 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2438 			      const struct intel_dpll_hw_state *hw_state)
2439 {
2440 	DRM_DEBUG_KMS("dpll_hw_state: "
2441 		      "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2442 		      hw_state->cfgcr0,
2443 		      hw_state->cfgcr1);
2444 }
2445 
2446 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2447 	.enable = cnl_ddi_pll_enable,
2448 	.disable = cnl_ddi_pll_disable,
2449 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2450 };
2451 
2452 static const struct dpll_info cnl_plls[] = {
2453 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2454 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2455 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2456 	{ },
2457 };
2458 
2459 static const struct intel_dpll_mgr cnl_pll_mgr = {
2460 	.dpll_info = cnl_plls,
2461 	.get_dplls = cnl_get_dpll,
2462 	.put_dplls = intel_put_dpll,
2463 	.dump_hw_state = cnl_dump_hw_state,
2464 };
2465 
2466 struct icl_combo_pll_params {
2467 	int clock;
2468 	struct skl_wrpll_params wrpll;
2469 };
2470 
2471 /*
2472  * These values alrea already adjusted: they're the bits we write to the
2473  * registers, not the logical values.
2474  */
2475 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2476 	{ 540000,
2477 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2478 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2479 	{ 270000,
2480 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2481 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2482 	{ 162000,
2483 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2484 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 	{ 324000,
2486 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2487 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2488 	{ 216000,
2489 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2490 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2491 	{ 432000,
2492 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2493 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 	{ 648000,
2495 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2496 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 	{ 810000,
2498 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2499 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 };
2501 
2502 
2503 /* Also used for 38.4 MHz values. */
2504 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2505 	{ 540000,
2506 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2507 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 	{ 270000,
2509 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2510 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2511 	{ 162000,
2512 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2513 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 	{ 324000,
2515 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2516 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 	{ 216000,
2518 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2519 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2520 	{ 432000,
2521 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2522 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 	{ 648000,
2524 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2525 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2526 	{ 810000,
2527 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2528 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2529 };
2530 
2531 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2532 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2533 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2534 };
2535 
2536 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2537 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2538 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2539 };
2540 
2541 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2542 				  struct skl_wrpll_params *pll_params)
2543 {
2544 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2545 	const struct icl_combo_pll_params *params =
2546 		dev_priv->cdclk.hw.ref == 24000 ?
2547 		icl_dp_combo_pll_24MHz_values :
2548 		icl_dp_combo_pll_19_2MHz_values;
2549 	int clock = crtc_state->port_clock;
2550 	int i;
2551 
2552 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2553 		if (clock == params[i].clock) {
2554 			*pll_params = params[i].wrpll;
2555 			return true;
2556 		}
2557 	}
2558 
2559 	MISSING_CASE(clock);
2560 	return false;
2561 }
2562 
2563 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2564 			     struct skl_wrpll_params *pll_params)
2565 {
2566 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2567 
2568 	*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2569 			icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2570 	return true;
2571 }
2572 
2573 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2574 				struct intel_encoder *encoder,
2575 				struct intel_dpll_hw_state *pll_state)
2576 {
2577 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2578 	u32 cfgcr0, cfgcr1;
2579 	struct skl_wrpll_params pll_params = { 0 };
2580 	bool ret;
2581 
2582 	if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2583 							encoder->port)))
2584 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2585 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2586 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2587 		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2588 	else
2589 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2590 
2591 	if (!ret)
2592 		return false;
2593 
2594 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2595 		 pll_params.dco_integer;
2596 
2597 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2598 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2599 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2600 		 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2601 
2602 	if (INTEL_GEN(dev_priv) >= 12)
2603 		cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2604 	else
2605 		cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2606 
2607 	memset(pll_state, 0, sizeof(*pll_state));
2608 
2609 	pll_state->cfgcr0 = cfgcr0;
2610 	pll_state->cfgcr1 = cfgcr1;
2611 
2612 	return true;
2613 }
2614 
2615 
2616 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2617 {
2618 	return id - DPLL_ID_ICL_MGPLL1;
2619 }
2620 
2621 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2622 {
2623 	return tc_port + DPLL_ID_ICL_MGPLL1;
2624 }
2625 
2626 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2627 				     u32 *target_dco_khz,
2628 				     struct intel_dpll_hw_state *state)
2629 {
2630 	u32 dco_min_freq, dco_max_freq;
2631 	int div1_vals[] = {7, 5, 3, 2};
2632 	unsigned int i;
2633 	int div2;
2634 
2635 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2636 	dco_max_freq = is_dp ? 8100000 : 10000000;
2637 
2638 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2639 		int div1 = div1_vals[i];
2640 
2641 		for (div2 = 10; div2 > 0; div2--) {
2642 			int dco = div1 * div2 * clock_khz * 5;
2643 			int a_divratio, tlinedrv, inputsel;
2644 			u32 hsdiv;
2645 
2646 			if (dco < dco_min_freq || dco > dco_max_freq)
2647 				continue;
2648 
2649 			if (div2 >= 2) {
2650 				a_divratio = is_dp ? 10 : 5;
2651 				tlinedrv = 2;
2652 			} else {
2653 				a_divratio = 5;
2654 				tlinedrv = 0;
2655 			}
2656 			inputsel = is_dp ? 0 : 1;
2657 
2658 			switch (div1) {
2659 			default:
2660 				MISSING_CASE(div1);
2661 				/* fall through */
2662 			case 2:
2663 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2664 				break;
2665 			case 3:
2666 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2667 				break;
2668 			case 5:
2669 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2670 				break;
2671 			case 7:
2672 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2673 				break;
2674 			}
2675 
2676 			*target_dco_khz = dco;
2677 
2678 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2679 
2680 			state->mg_clktop2_coreclkctl1 =
2681 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2682 
2683 			state->mg_clktop2_hsclkctl =
2684 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2685 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2686 				hsdiv |
2687 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2688 
2689 			return true;
2690 		}
2691 	}
2692 
2693 	return false;
2694 }
2695 
2696 /*
2697  * The specification for this function uses real numbers, so the math had to be
2698  * adapted to integer-only calculation, that's why it looks so different.
2699  */
2700 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2701 				  struct intel_dpll_hw_state *pll_state)
2702 {
2703 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2704 	int refclk_khz = dev_priv->cdclk.hw.ref;
2705 	int clock = crtc_state->port_clock;
2706 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2707 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2708 	u32 prop_coeff, int_coeff;
2709 	u32 tdc_targetcnt, feedfwgain;
2710 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2711 	u64 tmp;
2712 	bool use_ssc = false;
2713 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2714 
2715 	memset(pll_state, 0, sizeof(*pll_state));
2716 
2717 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2718 				      pll_state)) {
2719 		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2720 		return false;
2721 	}
2722 
2723 	m1div = 2;
2724 	m2div_int = dco_khz / (refclk_khz * m1div);
2725 	if (m2div_int > 255) {
2726 		m1div = 4;
2727 		m2div_int = dco_khz / (refclk_khz * m1div);
2728 		if (m2div_int > 255) {
2729 			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2730 				      clock);
2731 			return false;
2732 		}
2733 	}
2734 	m2div_rem = dco_khz % (refclk_khz * m1div);
2735 
2736 	tmp = (u64)m2div_rem * (1 << 22);
2737 	do_div(tmp, refclk_khz * m1div);
2738 	m2div_frac = tmp;
2739 
2740 	switch (refclk_khz) {
2741 	case 19200:
2742 		iref_ndiv = 1;
2743 		iref_trim = 28;
2744 		iref_pulse_w = 1;
2745 		break;
2746 	case 24000:
2747 		iref_ndiv = 1;
2748 		iref_trim = 25;
2749 		iref_pulse_w = 2;
2750 		break;
2751 	case 38400:
2752 		iref_ndiv = 2;
2753 		iref_trim = 28;
2754 		iref_pulse_w = 1;
2755 		break;
2756 	default:
2757 		MISSING_CASE(refclk_khz);
2758 		return false;
2759 	}
2760 
2761 	/*
2762 	 * tdc_res = 0.000003
2763 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2764 	 *
2765 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2766 	 * was supposed to be a division, but we rearranged the operations of
2767 	 * the formula to avoid early divisions so we don't multiply the
2768 	 * rounding errors.
2769 	 *
2770 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2771 	 * we also rearrange to work with integers.
2772 	 *
2773 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2774 	 * last division by 10.
2775 	 */
2776 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2777 
2778 	/*
2779 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2780 	 * 32 bits. That's not a problem since we round the division down
2781 	 * anyway.
2782 	 */
2783 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2784 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2785 
2786 	if (dco_khz >= 9000000) {
2787 		prop_coeff = 5;
2788 		int_coeff = 10;
2789 	} else {
2790 		prop_coeff = 4;
2791 		int_coeff = 8;
2792 	}
2793 
2794 	if (use_ssc) {
2795 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2796 		do_div(tmp, refclk_khz * m1div * 10000);
2797 		ssc_stepsize = tmp;
2798 
2799 		tmp = mul_u32_u32(dco_khz, 1000);
2800 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2801 	} else {
2802 		ssc_stepsize = 0;
2803 		ssc_steplen = 0;
2804 	}
2805 	ssc_steplog = 4;
2806 
2807 	pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2808 				  MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2809 				  MG_PLL_DIV0_FBDIV_INT(m2div_int);
2810 
2811 	pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2812 				 MG_PLL_DIV1_DITHER_DIV_2 |
2813 				 MG_PLL_DIV1_NDIVRATIO(1) |
2814 				 MG_PLL_DIV1_FBPREDIV(m1div);
2815 
2816 	pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2817 			       MG_PLL_LF_AFCCNTSEL_512 |
2818 			       MG_PLL_LF_GAINCTRL(1) |
2819 			       MG_PLL_LF_INT_COEFF(int_coeff) |
2820 			       MG_PLL_LF_PROP_COEFF(prop_coeff);
2821 
2822 	pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2823 				      MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2824 				      MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2825 				      MG_PLL_FRAC_LOCK_DCODITHEREN |
2826 				      MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2827 	if (use_ssc || m2div_rem > 0)
2828 		pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2829 
2830 	pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2831 				MG_PLL_SSC_TYPE(2) |
2832 				MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2833 				MG_PLL_SSC_STEPNUM(ssc_steplog) |
2834 				MG_PLL_SSC_FLLEN |
2835 				MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2836 
2837 	pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2838 					    MG_PLL_TDC_COLDST_IREFINT_EN |
2839 					    MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2840 					    MG_PLL_TDC_TDCOVCCORR_EN |
2841 					    MG_PLL_TDC_TDCSEL(3);
2842 
2843 	pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2844 				 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2845 				 MG_PLL_BIAS_BIAS_BONUS(10) |
2846 				 MG_PLL_BIAS_BIASCAL_EN |
2847 				 MG_PLL_BIAS_CTRIM(12) |
2848 				 MG_PLL_BIAS_VREF_RDAC(4) |
2849 				 MG_PLL_BIAS_IREFTRIM(iref_trim);
2850 
2851 	if (refclk_khz == 38400) {
2852 		pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2853 		pll_state->mg_pll_bias_mask = 0;
2854 	} else {
2855 		pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2856 		pll_state->mg_pll_bias_mask = -1U;
2857 	}
2858 
2859 	pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2860 	pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2861 
2862 	return true;
2863 }
2864 
2865 /**
2866  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2867  * @crtc_state: state for the CRTC to select the DPLL for
2868  * @port_dpll_id: the active @port_dpll_id to select
2869  *
2870  * Select the given @port_dpll_id instance from the DPLLs reserved for the
2871  * CRTC.
2872  */
2873 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2874 			      enum icl_port_dpll_id port_dpll_id)
2875 {
2876 	struct icl_port_dpll *port_dpll =
2877 		&crtc_state->icl_port_dplls[port_dpll_id];
2878 
2879 	crtc_state->shared_dpll = port_dpll->pll;
2880 	crtc_state->dpll_hw_state = port_dpll->hw_state;
2881 }
2882 
2883 static void icl_update_active_dpll(struct intel_atomic_state *state,
2884 				   struct intel_crtc *crtc,
2885 				   struct intel_encoder *encoder)
2886 {
2887 	struct intel_crtc_state *crtc_state =
2888 		intel_atomic_get_new_crtc_state(state, crtc);
2889 	struct intel_digital_port *primary_port;
2890 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2891 
2892 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
2893 		enc_to_mst(&encoder->base)->primary :
2894 		enc_to_dig_port(&encoder->base);
2895 
2896 	if (primary_port &&
2897 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
2898 	     primary_port->tc_mode == TC_PORT_LEGACY))
2899 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
2900 
2901 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
2902 }
2903 
2904 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
2905 				   struct intel_crtc *crtc,
2906 				   struct intel_encoder *encoder)
2907 {
2908 	struct intel_crtc_state *crtc_state =
2909 		intel_atomic_get_new_crtc_state(state, crtc);
2910 	struct icl_port_dpll *port_dpll =
2911 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2912 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2913 	enum port port = encoder->port;
2914 	bool has_dpll4 = false;
2915 
2916 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2917 		DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
2918 
2919 		return false;
2920 	}
2921 
2922 	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
2923 		has_dpll4 = true;
2924 
2925 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2926 						&port_dpll->hw_state,
2927 						DPLL_ID_ICL_DPLL0,
2928 						has_dpll4 ? DPLL_ID_EHL_DPLL4
2929 							  : DPLL_ID_ICL_DPLL1);
2930 	if (!port_dpll->pll) {
2931 		DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
2932 			      port_name(encoder->port));
2933 		return false;
2934 	}
2935 
2936 	intel_reference_shared_dpll(state, crtc,
2937 				    port_dpll->pll, &port_dpll->hw_state);
2938 
2939 	icl_update_active_dpll(state, crtc, encoder);
2940 
2941 	return true;
2942 }
2943 
2944 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
2945 				 struct intel_crtc *crtc,
2946 				 struct intel_encoder *encoder)
2947 {
2948 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2949 	struct intel_crtc_state *crtc_state =
2950 		intel_atomic_get_new_crtc_state(state, crtc);
2951 	struct icl_port_dpll *port_dpll;
2952 	enum intel_dpll_id dpll_id;
2953 
2954 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2955 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2956 		DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
2957 		return false;
2958 	}
2959 
2960 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2961 						&port_dpll->hw_state,
2962 						DPLL_ID_ICL_TBTPLL,
2963 						DPLL_ID_ICL_TBTPLL);
2964 	if (!port_dpll->pll) {
2965 		DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
2966 		return false;
2967 	}
2968 	intel_reference_shared_dpll(state, crtc,
2969 				    port_dpll->pll, &port_dpll->hw_state);
2970 
2971 
2972 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
2973 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
2974 		DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
2975 		goto err_unreference_tbt_pll;
2976 	}
2977 
2978 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
2979 							 encoder->port));
2980 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2981 						&port_dpll->hw_state,
2982 						dpll_id,
2983 						dpll_id);
2984 	if (!port_dpll->pll) {
2985 		DRM_DEBUG_KMS("No MG PHY PLL found\n");
2986 		goto err_unreference_tbt_pll;
2987 	}
2988 	intel_reference_shared_dpll(state, crtc,
2989 				    port_dpll->pll, &port_dpll->hw_state);
2990 
2991 	icl_update_active_dpll(state, crtc, encoder);
2992 
2993 	return true;
2994 
2995 err_unreference_tbt_pll:
2996 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2997 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
2998 
2999 	return false;
3000 }
3001 
3002 static bool icl_get_dplls(struct intel_atomic_state *state,
3003 			  struct intel_crtc *crtc,
3004 			  struct intel_encoder *encoder)
3005 {
3006 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3007 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3008 
3009 	if (intel_phy_is_combo(dev_priv, phy))
3010 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3011 	else if (intel_phy_is_tc(dev_priv, phy))
3012 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3013 
3014 	MISSING_CASE(phy);
3015 
3016 	return false;
3017 }
3018 
3019 static void icl_put_dplls(struct intel_atomic_state *state,
3020 			  struct intel_crtc *crtc)
3021 {
3022 	const struct intel_crtc_state *old_crtc_state =
3023 		intel_atomic_get_old_crtc_state(state, crtc);
3024 	struct intel_crtc_state *new_crtc_state =
3025 		intel_atomic_get_new_crtc_state(state, crtc);
3026 	enum icl_port_dpll_id id;
3027 
3028 	new_crtc_state->shared_dpll = NULL;
3029 
3030 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3031 		const struct icl_port_dpll *old_port_dpll =
3032 			&old_crtc_state->icl_port_dplls[id];
3033 		struct icl_port_dpll *new_port_dpll =
3034 			&new_crtc_state->icl_port_dplls[id];
3035 
3036 		new_port_dpll->pll = NULL;
3037 
3038 		if (!old_port_dpll->pll)
3039 			continue;
3040 
3041 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3042 	}
3043 }
3044 
3045 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3046 				struct intel_shared_dpll *pll,
3047 				struct intel_dpll_hw_state *hw_state)
3048 {
3049 	const enum intel_dpll_id id = pll->info->id;
3050 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3051 	intel_wakeref_t wakeref;
3052 	bool ret = false;
3053 	u32 val;
3054 
3055 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3056 						     POWER_DOMAIN_DISPLAY_CORE);
3057 	if (!wakeref)
3058 		return false;
3059 
3060 	val = I915_READ(MG_PLL_ENABLE(tc_port));
3061 	if (!(val & PLL_ENABLE))
3062 		goto out;
3063 
3064 	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
3065 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3066 
3067 	hw_state->mg_clktop2_coreclkctl1 =
3068 		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3069 	hw_state->mg_clktop2_coreclkctl1 &=
3070 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3071 
3072 	hw_state->mg_clktop2_hsclkctl =
3073 		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3074 	hw_state->mg_clktop2_hsclkctl &=
3075 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3076 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3077 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3078 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3079 
3080 	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
3081 	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
3082 	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
3083 	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
3084 	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
3085 
3086 	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
3087 	hw_state->mg_pll_tdc_coldst_bias =
3088 		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3089 
3090 	if (dev_priv->cdclk.hw.ref == 38400) {
3091 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3092 		hw_state->mg_pll_bias_mask = 0;
3093 	} else {
3094 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3095 		hw_state->mg_pll_bias_mask = -1U;
3096 	}
3097 
3098 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3099 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3100 
3101 	ret = true;
3102 out:
3103 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3104 	return ret;
3105 }
3106 
3107 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3108 				 struct intel_shared_dpll *pll,
3109 				 struct intel_dpll_hw_state *hw_state,
3110 				 i915_reg_t enable_reg)
3111 {
3112 	const enum intel_dpll_id id = pll->info->id;
3113 	intel_wakeref_t wakeref;
3114 	bool ret = false;
3115 	u32 val;
3116 
3117 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3118 						     POWER_DOMAIN_DISPLAY_CORE);
3119 	if (!wakeref)
3120 		return false;
3121 
3122 	val = I915_READ(enable_reg);
3123 	if (!(val & PLL_ENABLE))
3124 		goto out;
3125 
3126 	if (INTEL_GEN(dev_priv) >= 12) {
3127 		hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
3128 		hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
3129 	} else {
3130 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3131 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
3132 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
3133 		} else {
3134 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3135 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3136 		}
3137 	}
3138 
3139 	ret = true;
3140 out:
3141 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3142 	return ret;
3143 }
3144 
3145 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3146 				   struct intel_shared_dpll *pll,
3147 				   struct intel_dpll_hw_state *hw_state)
3148 {
3149 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3150 
3151 	if (IS_ELKHARTLAKE(dev_priv) &&
3152 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3153 		enable_reg = MG_PLL_ENABLE(0);
3154 	}
3155 
3156 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3157 }
3158 
3159 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3160 				 struct intel_shared_dpll *pll,
3161 				 struct intel_dpll_hw_state *hw_state)
3162 {
3163 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3164 }
3165 
3166 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3167 			   struct intel_shared_dpll *pll)
3168 {
3169 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3170 	const enum intel_dpll_id id = pll->info->id;
3171 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3172 
3173 	if (INTEL_GEN(dev_priv) >= 12) {
3174 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3175 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3176 	} else {
3177 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3178 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3179 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3180 		} else {
3181 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3182 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3183 		}
3184 	}
3185 
3186 	I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
3187 	I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
3188 	POSTING_READ(cfgcr1_reg);
3189 }
3190 
3191 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3192 			     struct intel_shared_dpll *pll)
3193 {
3194 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3195 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3196 	u32 val;
3197 
3198 	/*
3199 	 * Some of the following registers have reserved fields, so program
3200 	 * these with RMW based on a mask. The mask can be fixed or generated
3201 	 * during the calc/readout phase if the mask depends on some other HW
3202 	 * state like refclk, see icl_calc_mg_pll_state().
3203 	 */
3204 	val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3205 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3206 	val |= hw_state->mg_refclkin_ctl;
3207 	I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3208 
3209 	val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3210 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3211 	val |= hw_state->mg_clktop2_coreclkctl1;
3212 	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3213 
3214 	val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3215 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3216 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3217 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3218 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3219 	val |= hw_state->mg_clktop2_hsclkctl;
3220 	I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3221 
3222 	I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3223 	I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3224 	I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3225 	I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3226 	I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3227 
3228 	val = I915_READ(MG_PLL_BIAS(tc_port));
3229 	val &= ~hw_state->mg_pll_bias_mask;
3230 	val |= hw_state->mg_pll_bias;
3231 	I915_WRITE(MG_PLL_BIAS(tc_port), val);
3232 
3233 	val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3234 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3235 	val |= hw_state->mg_pll_tdc_coldst_bias;
3236 	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3237 
3238 	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3239 }
3240 
3241 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3242 				 struct intel_shared_dpll *pll,
3243 				 i915_reg_t enable_reg)
3244 {
3245 	u32 val;
3246 
3247 	val = I915_READ(enable_reg);
3248 	val |= PLL_POWER_ENABLE;
3249 	I915_WRITE(enable_reg, val);
3250 
3251 	/*
3252 	 * The spec says we need to "wait" but it also says it should be
3253 	 * immediate.
3254 	 */
3255 	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
3256 				    PLL_POWER_STATE, PLL_POWER_STATE, 1))
3257 		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3258 }
3259 
3260 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3261 			   struct intel_shared_dpll *pll,
3262 			   i915_reg_t enable_reg)
3263 {
3264 	u32 val;
3265 
3266 	val = I915_READ(enable_reg);
3267 	val |= PLL_ENABLE;
3268 	I915_WRITE(enable_reg, val);
3269 
3270 	/* Timeout is actually 600us. */
3271 	if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
3272 				    PLL_LOCK, PLL_LOCK, 1))
3273 		DRM_ERROR("PLL %d not locked\n", pll->info->id);
3274 }
3275 
3276 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3277 			     struct intel_shared_dpll *pll)
3278 {
3279 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3280 
3281 	if (IS_ELKHARTLAKE(dev_priv) &&
3282 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3283 		enable_reg = MG_PLL_ENABLE(0);
3284 
3285 		/*
3286 		 * We need to disable DC states when this DPLL is enabled.
3287 		 * This can be done by taking a reference on DPLL4 power
3288 		 * domain.
3289 		 */
3290 		pll->wakeref = intel_display_power_get(dev_priv,
3291 						       POWER_DOMAIN_DPLL_DC_OFF);
3292 	}
3293 
3294 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3295 
3296 	icl_dpll_write(dev_priv, pll);
3297 
3298 	/*
3299 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3300 	 * paths should already be setting the appropriate voltage, hence we do
3301 	 * nothing here.
3302 	 */
3303 
3304 	icl_pll_enable(dev_priv, pll, enable_reg);
3305 
3306 	/* DVFS post sequence would be here. See the comment above. */
3307 }
3308 
3309 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3310 			   struct intel_shared_dpll *pll)
3311 {
3312 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3313 
3314 	icl_dpll_write(dev_priv, pll);
3315 
3316 	/*
3317 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3318 	 * paths should already be setting the appropriate voltage, hence we do
3319 	 * nothing here.
3320 	 */
3321 
3322 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3323 
3324 	/* DVFS post sequence would be here. See the comment above. */
3325 }
3326 
3327 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3328 			  struct intel_shared_dpll *pll)
3329 {
3330 	i915_reg_t enable_reg =
3331 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3332 
3333 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3334 
3335 	icl_mg_pll_write(dev_priv, pll);
3336 
3337 	/*
3338 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3339 	 * paths should already be setting the appropriate voltage, hence we do
3340 	 * nothing here.
3341 	 */
3342 
3343 	icl_pll_enable(dev_priv, pll, enable_reg);
3344 
3345 	/* DVFS post sequence would be here. See the comment above. */
3346 }
3347 
3348 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3349 			    struct intel_shared_dpll *pll,
3350 			    i915_reg_t enable_reg)
3351 {
3352 	u32 val;
3353 
3354 	/* The first steps are done by intel_ddi_post_disable(). */
3355 
3356 	/*
3357 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3358 	 * paths should already be setting the appropriate voltage, hence we do
3359 	 * nothign here.
3360 	 */
3361 
3362 	val = I915_READ(enable_reg);
3363 	val &= ~PLL_ENABLE;
3364 	I915_WRITE(enable_reg, val);
3365 
3366 	/* Timeout is actually 1us. */
3367 	if (intel_wait_for_register(&dev_priv->uncore,
3368 				    enable_reg, PLL_LOCK, 0, 1))
3369 		DRM_ERROR("PLL %d locked\n", pll->info->id);
3370 
3371 	/* DVFS post sequence would be here. See the comment above. */
3372 
3373 	val = I915_READ(enable_reg);
3374 	val &= ~PLL_POWER_ENABLE;
3375 	I915_WRITE(enable_reg, val);
3376 
3377 	/*
3378 	 * The spec says we need to "wait" but it also says it should be
3379 	 * immediate.
3380 	 */
3381 	if (intel_wait_for_register(&dev_priv->uncore,
3382 				    enable_reg, PLL_POWER_STATE, 0, 1))
3383 		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3384 }
3385 
3386 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3387 			      struct intel_shared_dpll *pll)
3388 {
3389 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3390 
3391 	if (IS_ELKHARTLAKE(dev_priv) &&
3392 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3393 		enable_reg = MG_PLL_ENABLE(0);
3394 		icl_pll_disable(dev_priv, pll, enable_reg);
3395 
3396 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3397 					pll->wakeref);
3398 		return;
3399 	}
3400 
3401 	icl_pll_disable(dev_priv, pll, enable_reg);
3402 }
3403 
3404 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3405 			    struct intel_shared_dpll *pll)
3406 {
3407 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3408 }
3409 
3410 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3411 			   struct intel_shared_dpll *pll)
3412 {
3413 	i915_reg_t enable_reg =
3414 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3415 
3416 	icl_pll_disable(dev_priv, pll, enable_reg);
3417 }
3418 
3419 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3420 			      const struct intel_dpll_hw_state *hw_state)
3421 {
3422 	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3423 		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3424 		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3425 		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3426 		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3427 		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3428 		      hw_state->cfgcr0, hw_state->cfgcr1,
3429 		      hw_state->mg_refclkin_ctl,
3430 		      hw_state->mg_clktop2_coreclkctl1,
3431 		      hw_state->mg_clktop2_hsclkctl,
3432 		      hw_state->mg_pll_div0,
3433 		      hw_state->mg_pll_div1,
3434 		      hw_state->mg_pll_lf,
3435 		      hw_state->mg_pll_frac_lock,
3436 		      hw_state->mg_pll_ssc,
3437 		      hw_state->mg_pll_bias,
3438 		      hw_state->mg_pll_tdc_coldst_bias);
3439 }
3440 
3441 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3442 	.enable = combo_pll_enable,
3443 	.disable = combo_pll_disable,
3444 	.get_hw_state = combo_pll_get_hw_state,
3445 };
3446 
3447 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3448 	.enable = tbt_pll_enable,
3449 	.disable = tbt_pll_disable,
3450 	.get_hw_state = tbt_pll_get_hw_state,
3451 };
3452 
3453 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3454 	.enable = mg_pll_enable,
3455 	.disable = mg_pll_disable,
3456 	.get_hw_state = mg_pll_get_hw_state,
3457 };
3458 
3459 static const struct dpll_info icl_plls[] = {
3460 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3461 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3462 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3463 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3464 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3465 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3466 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3467 	{ },
3468 };
3469 
3470 static const struct intel_dpll_mgr icl_pll_mgr = {
3471 	.dpll_info = icl_plls,
3472 	.get_dplls = icl_get_dplls,
3473 	.put_dplls = icl_put_dplls,
3474 	.update_active_dpll = icl_update_active_dpll,
3475 	.dump_hw_state = icl_dump_hw_state,
3476 };
3477 
3478 static const struct dpll_info ehl_plls[] = {
3479 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3480 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3481 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3482 	{ },
3483 };
3484 
3485 static const struct intel_dpll_mgr ehl_pll_mgr = {
3486 	.dpll_info = ehl_plls,
3487 	.get_dplls = icl_get_dplls,
3488 	.put_dplls = icl_put_dplls,
3489 	.dump_hw_state = icl_dump_hw_state,
3490 };
3491 
3492 static const struct dpll_info tgl_plls[] = {
3493 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3494 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3495 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3496 	/* TODO: Add typeC plls */
3497 	{ },
3498 };
3499 
3500 static const struct intel_dpll_mgr tgl_pll_mgr = {
3501 	.dpll_info = tgl_plls,
3502 	.get_dplls = icl_get_dplls,
3503 	.put_dplls = icl_put_dplls,
3504 	.dump_hw_state = icl_dump_hw_state,
3505 };
3506 
3507 /**
3508  * intel_shared_dpll_init - Initialize shared DPLLs
3509  * @dev: drm device
3510  *
3511  * Initialize shared DPLLs for @dev.
3512  */
3513 void intel_shared_dpll_init(struct drm_device *dev)
3514 {
3515 	struct drm_i915_private *dev_priv = to_i915(dev);
3516 	const struct intel_dpll_mgr *dpll_mgr = NULL;
3517 	const struct dpll_info *dpll_info;
3518 	int i;
3519 
3520 	if (INTEL_GEN(dev_priv) >= 12)
3521 		dpll_mgr = &tgl_pll_mgr;
3522 	else if (IS_ELKHARTLAKE(dev_priv))
3523 		dpll_mgr = &ehl_pll_mgr;
3524 	else if (INTEL_GEN(dev_priv) >= 11)
3525 		dpll_mgr = &icl_pll_mgr;
3526 	else if (IS_CANNONLAKE(dev_priv))
3527 		dpll_mgr = &cnl_pll_mgr;
3528 	else if (IS_GEN9_BC(dev_priv))
3529 		dpll_mgr = &skl_pll_mgr;
3530 	else if (IS_GEN9_LP(dev_priv))
3531 		dpll_mgr = &bxt_pll_mgr;
3532 	else if (HAS_DDI(dev_priv))
3533 		dpll_mgr = &hsw_pll_mgr;
3534 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3535 		dpll_mgr = &pch_pll_mgr;
3536 
3537 	if (!dpll_mgr) {
3538 		dev_priv->num_shared_dpll = 0;
3539 		return;
3540 	}
3541 
3542 	dpll_info = dpll_mgr->dpll_info;
3543 
3544 	for (i = 0; dpll_info[i].name; i++) {
3545 		WARN_ON(i != dpll_info[i].id);
3546 		dev_priv->shared_dplls[i].info = &dpll_info[i];
3547 	}
3548 
3549 	dev_priv->dpll_mgr = dpll_mgr;
3550 	dev_priv->num_shared_dpll = i;
3551 	mutex_init(&dev_priv->dpll_lock);
3552 
3553 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3554 }
3555 
3556 /**
3557  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3558  * @state: atomic state
3559  * @crtc: CRTC to reserve DPLLs for
3560  * @encoder: encoder
3561  *
3562  * This function reserves all required DPLLs for the given CRTC and encoder
3563  * combination in the current atomic commit @state and the new @crtc atomic
3564  * state.
3565  *
3566  * The new configuration in the atomic commit @state is made effective by
3567  * calling intel_shared_dpll_swap_state().
3568  *
3569  * The reserved DPLLs should be released by calling
3570  * intel_release_shared_dplls().
3571  *
3572  * Returns:
3573  * True if all required DPLLs were successfully reserved.
3574  */
3575 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3576 				struct intel_crtc *crtc,
3577 				struct intel_encoder *encoder)
3578 {
3579 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3580 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3581 
3582 	if (WARN_ON(!dpll_mgr))
3583 		return false;
3584 
3585 	return dpll_mgr->get_dplls(state, crtc, encoder);
3586 }
3587 
3588 /**
3589  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3590  * @state: atomic state
3591  * @crtc: crtc from which the DPLLs are to be released
3592  *
3593  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3594  * from the current atomic commit @state and the old @crtc atomic state.
3595  *
3596  * The new configuration in the atomic commit @state is made effective by
3597  * calling intel_shared_dpll_swap_state().
3598  */
3599 void intel_release_shared_dplls(struct intel_atomic_state *state,
3600 				struct intel_crtc *crtc)
3601 {
3602 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3603 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3604 
3605 	/*
3606 	 * FIXME: this function is called for every platform having a
3607 	 * compute_clock hook, even though the platform doesn't yet support
3608 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3609 	 * called on those.
3610 	 */
3611 	if (!dpll_mgr)
3612 		return;
3613 
3614 	dpll_mgr->put_dplls(state, crtc);
3615 }
3616 
3617 /**
3618  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3619  * @state: atomic state
3620  * @crtc: the CRTC for which to update the active DPLL
3621  * @encoder: encoder determining the type of port DPLL
3622  *
3623  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3624  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3625  * DPLL selected will be based on the current mode of the encoder's port.
3626  */
3627 void intel_update_active_dpll(struct intel_atomic_state *state,
3628 			      struct intel_crtc *crtc,
3629 			      struct intel_encoder *encoder)
3630 {
3631 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3632 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3633 
3634 	if (WARN_ON(!dpll_mgr))
3635 		return;
3636 
3637 	dpll_mgr->update_active_dpll(state, crtc, encoder);
3638 }
3639 
3640 /**
3641  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3642  * @dev_priv: i915 drm device
3643  * @hw_state: hw state to be written to the log
3644  *
3645  * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3646  */
3647 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3648 			      const struct intel_dpll_hw_state *hw_state)
3649 {
3650 	if (dev_priv->dpll_mgr) {
3651 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3652 	} else {
3653 		/* fallback for platforms that don't use the shared dpll
3654 		 * infrastructure
3655 		 */
3656 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3657 			      "fp0: 0x%x, fp1: 0x%x\n",
3658 			      hw_state->dpll,
3659 			      hw_state->dpll_md,
3660 			      hw_state->fp0,
3661 			      hw_state->fp1);
3662 	}
3663 }
3664