1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 static void
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 				  struct intel_shared_dpll_state *shared_dpll)
51 {
52 	enum intel_dpll_id i;
53 
54 	/* Copy shared dpll state */
55 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
57 
58 		shared_dpll[i] = pll->state;
59 	}
60 }
61 
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
64 {
65 	struct intel_atomic_state *state = to_intel_atomic_state(s);
66 
67 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
68 
69 	if (!state->dpll_set) {
70 		state->dpll_set = true;
71 
72 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 						  state->shared_dpll);
74 	}
75 
76 	return state->shared_dpll;
77 }
78 
79 /**
80  * intel_get_shared_dpll_by_id - get a DPLL given its id
81  * @dev_priv: i915 device instance
82  * @id: pll id
83  *
84  * Returns:
85  * A pointer to the DPLL with @id
86  */
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 			    enum intel_dpll_id id)
90 {
91 	return &dev_priv->shared_dplls[id];
92 }
93 
94 /**
95  * intel_get_shared_dpll_id - get the id of a DPLL
96  * @dev_priv: i915 device instance
97  * @pll: the DPLL
98  *
99  * Returns:
100  * The id of @pll
101  */
102 enum intel_dpll_id
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 			 struct intel_shared_dpll *pll)
105 {
106 	if (WARN_ON(pll < dev_priv->shared_dplls||
107 		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
108 		return -1;
109 
110 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 }
112 
113 /* For ILK+ */
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 			struct intel_shared_dpll *pll,
116 			bool state)
117 {
118 	bool cur_state;
119 	struct intel_dpll_hw_state hw_state;
120 
121 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
122 		return;
123 
124 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
125 	I915_STATE_WARN(cur_state != state,
126 	     "%s assertion failure (expected %s, current %s)\n",
127 			pll->info->name, onoff(state), onoff(cur_state));
128 }
129 
130 /**
131  * intel_prepare_shared_dpll - call a dpll's prepare hook
132  * @crtc_state: CRTC, and its state, which has a shared dpll
133  *
134  * This calls the PLL's prepare hook if it has one and if the PLL is not
135  * already enabled. The prepare hook is platform specific.
136  */
137 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
138 {
139 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
140 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
141 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
142 
143 	if (WARN_ON(pll == NULL))
144 		return;
145 
146 	mutex_lock(&dev_priv->dpll_lock);
147 	WARN_ON(!pll->state.crtc_mask);
148 	if (!pll->active_mask) {
149 		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
150 		WARN_ON(pll->on);
151 		assert_shared_dpll_disabled(dev_priv, pll);
152 
153 		pll->info->funcs->prepare(dev_priv, pll);
154 	}
155 	mutex_unlock(&dev_priv->dpll_lock);
156 }
157 
158 /**
159  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
160  * @crtc_state: CRTC, and its state, which has a shared DPLL
161  *
162  * Enable the shared DPLL used by @crtc.
163  */
164 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
165 {
166 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
167 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
168 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
169 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
170 	unsigned int old_mask;
171 
172 	if (WARN_ON(pll == NULL))
173 		return;
174 
175 	mutex_lock(&dev_priv->dpll_lock);
176 	old_mask = pll->active_mask;
177 
178 	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
179 	    WARN_ON(pll->active_mask & crtc_mask))
180 		goto out;
181 
182 	pll->active_mask |= crtc_mask;
183 
184 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
185 		      pll->info->name, pll->active_mask, pll->on,
186 		      crtc->base.base.id);
187 
188 	if (old_mask) {
189 		WARN_ON(!pll->on);
190 		assert_shared_dpll_enabled(dev_priv, pll);
191 		goto out;
192 	}
193 	WARN_ON(pll->on);
194 
195 	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
196 	pll->info->funcs->enable(dev_priv, pll);
197 	pll->on = true;
198 
199 out:
200 	mutex_unlock(&dev_priv->dpll_lock);
201 }
202 
203 /**
204  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
205  * @crtc_state: CRTC, and its state, which has a shared DPLL
206  *
207  * Disable the shared DPLL used by @crtc.
208  */
209 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
210 {
211 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
212 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
213 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
214 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
215 
216 	/* PCH only available on ILK+ */
217 	if (INTEL_GEN(dev_priv) < 5)
218 		return;
219 
220 	if (pll == NULL)
221 		return;
222 
223 	mutex_lock(&dev_priv->dpll_lock);
224 	if (WARN_ON(!(pll->active_mask & crtc_mask)))
225 		goto out;
226 
227 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
228 		      pll->info->name, pll->active_mask, pll->on,
229 		      crtc->base.base.id);
230 
231 	assert_shared_dpll_enabled(dev_priv, pll);
232 	WARN_ON(!pll->on);
233 
234 	pll->active_mask &= ~crtc_mask;
235 	if (pll->active_mask)
236 		goto out;
237 
238 	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
239 	pll->info->funcs->disable(dev_priv, pll);
240 	pll->on = false;
241 
242 out:
243 	mutex_unlock(&dev_priv->dpll_lock);
244 }
245 
246 static struct intel_shared_dpll *
247 intel_find_shared_dpll(struct intel_atomic_state *state,
248 		       const struct intel_crtc *crtc,
249 		       const struct intel_dpll_hw_state *pll_state,
250 		       enum intel_dpll_id range_min,
251 		       enum intel_dpll_id range_max)
252 {
253 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
254 	struct intel_shared_dpll *pll, *unused_pll = NULL;
255 	struct intel_shared_dpll_state *shared_dpll;
256 	enum intel_dpll_id i;
257 
258 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
259 
260 	for (i = range_min; i <= range_max; i++) {
261 		pll = &dev_priv->shared_dplls[i];
262 
263 		/* Only want to check enabled timings first */
264 		if (shared_dpll[i].crtc_mask == 0) {
265 			if (!unused_pll)
266 				unused_pll = pll;
267 			continue;
268 		}
269 
270 		if (memcmp(pll_state,
271 			   &shared_dpll[i].hw_state,
272 			   sizeof(*pll_state)) == 0) {
273 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
274 				      crtc->base.base.id, crtc->base.name,
275 				      pll->info->name,
276 				      shared_dpll[i].crtc_mask,
277 				      pll->active_mask);
278 			return pll;
279 		}
280 	}
281 
282 	/* Ok no matching timings, maybe there's a free one? */
283 	if (unused_pll) {
284 		DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
285 			      crtc->base.base.id, crtc->base.name,
286 			      unused_pll->info->name);
287 		return unused_pll;
288 	}
289 
290 	return NULL;
291 }
292 
293 static void
294 intel_reference_shared_dpll(struct intel_atomic_state *state,
295 			    const struct intel_crtc *crtc,
296 			    const struct intel_shared_dpll *pll,
297 			    const struct intel_dpll_hw_state *pll_state)
298 {
299 	struct intel_shared_dpll_state *shared_dpll;
300 	const enum intel_dpll_id id = pll->info->id;
301 
302 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
303 
304 	if (shared_dpll[id].crtc_mask == 0)
305 		shared_dpll[id].hw_state = *pll_state;
306 
307 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
308 			 pipe_name(crtc->pipe));
309 
310 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
311 }
312 
313 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
314 					  const struct intel_crtc *crtc,
315 					  const struct intel_shared_dpll *pll)
316 {
317 	struct intel_shared_dpll_state *shared_dpll;
318 
319 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
320 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
321 }
322 
323 static void intel_put_dpll(struct intel_atomic_state *state,
324 			   struct intel_crtc *crtc)
325 {
326 	const struct intel_crtc_state *old_crtc_state =
327 		intel_atomic_get_old_crtc_state(state, crtc);
328 	struct intel_crtc_state *new_crtc_state =
329 		intel_atomic_get_new_crtc_state(state, crtc);
330 
331 	new_crtc_state->shared_dpll = NULL;
332 
333 	if (!old_crtc_state->shared_dpll)
334 		return;
335 
336 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
337 }
338 
339 /**
340  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
341  * @state: atomic state
342  *
343  * This is the dpll version of drm_atomic_helper_swap_state() since the
344  * helper does not handle driver-specific global state.
345  *
346  * For consistency with atomic helpers this function does a complete swap,
347  * i.e. it also puts the current state into @state, even though there is no
348  * need for that at this moment.
349  */
350 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
351 {
352 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
353 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
354 	enum intel_dpll_id i;
355 
356 	if (!state->dpll_set)
357 		return;
358 
359 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
360 		struct intel_shared_dpll *pll =
361 			&dev_priv->shared_dplls[i];
362 
363 		swap(pll->state, shared_dpll[i]);
364 	}
365 }
366 
367 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
368 				      struct intel_shared_dpll *pll,
369 				      struct intel_dpll_hw_state *hw_state)
370 {
371 	const enum intel_dpll_id id = pll->info->id;
372 	intel_wakeref_t wakeref;
373 	u32 val;
374 
375 	wakeref = intel_display_power_get_if_enabled(dev_priv,
376 						     POWER_DOMAIN_DISPLAY_CORE);
377 	if (!wakeref)
378 		return false;
379 
380 	val = I915_READ(PCH_DPLL(id));
381 	hw_state->dpll = val;
382 	hw_state->fp0 = I915_READ(PCH_FP0(id));
383 	hw_state->fp1 = I915_READ(PCH_FP1(id));
384 
385 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
386 
387 	return val & DPLL_VCO_ENABLE;
388 }
389 
390 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
391 				 struct intel_shared_dpll *pll)
392 {
393 	const enum intel_dpll_id id = pll->info->id;
394 
395 	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
396 	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
397 }
398 
399 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
400 {
401 	u32 val;
402 	bool enabled;
403 
404 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
405 
406 	val = I915_READ(PCH_DREF_CONTROL);
407 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
408 			    DREF_SUPERSPREAD_SOURCE_MASK));
409 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
410 }
411 
412 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
413 				struct intel_shared_dpll *pll)
414 {
415 	const enum intel_dpll_id id = pll->info->id;
416 
417 	/* PCH refclock must be enabled first */
418 	ibx_assert_pch_refclk_enabled(dev_priv);
419 
420 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
421 
422 	/* Wait for the clocks to stabilize. */
423 	POSTING_READ(PCH_DPLL(id));
424 	udelay(150);
425 
426 	/* The pixel multiplier can only be updated once the
427 	 * DPLL is enabled and the clocks are stable.
428 	 *
429 	 * So write it again.
430 	 */
431 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
432 	POSTING_READ(PCH_DPLL(id));
433 	udelay(200);
434 }
435 
436 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
437 				 struct intel_shared_dpll *pll)
438 {
439 	const enum intel_dpll_id id = pll->info->id;
440 
441 	I915_WRITE(PCH_DPLL(id), 0);
442 	POSTING_READ(PCH_DPLL(id));
443 	udelay(200);
444 }
445 
446 static bool ibx_get_dpll(struct intel_atomic_state *state,
447 			 struct intel_crtc *crtc,
448 			 struct intel_encoder *encoder)
449 {
450 	struct intel_crtc_state *crtc_state =
451 		intel_atomic_get_new_crtc_state(state, crtc);
452 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
453 	struct intel_shared_dpll *pll;
454 	enum intel_dpll_id i;
455 
456 	if (HAS_PCH_IBX(dev_priv)) {
457 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
458 		i = (enum intel_dpll_id) crtc->pipe;
459 		pll = &dev_priv->shared_dplls[i];
460 
461 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
462 			      crtc->base.base.id, crtc->base.name,
463 			      pll->info->name);
464 	} else {
465 		pll = intel_find_shared_dpll(state, crtc,
466 					     &crtc_state->dpll_hw_state,
467 					     DPLL_ID_PCH_PLL_A,
468 					     DPLL_ID_PCH_PLL_B);
469 	}
470 
471 	if (!pll)
472 		return false;
473 
474 	/* reference the pll */
475 	intel_reference_shared_dpll(state, crtc,
476 				    pll, &crtc_state->dpll_hw_state);
477 
478 	crtc_state->shared_dpll = pll;
479 
480 	return true;
481 }
482 
483 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
484 			      const struct intel_dpll_hw_state *hw_state)
485 {
486 	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
487 		      "fp0: 0x%x, fp1: 0x%x\n",
488 		      hw_state->dpll,
489 		      hw_state->dpll_md,
490 		      hw_state->fp0,
491 		      hw_state->fp1);
492 }
493 
494 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
495 	.prepare = ibx_pch_dpll_prepare,
496 	.enable = ibx_pch_dpll_enable,
497 	.disable = ibx_pch_dpll_disable,
498 	.get_hw_state = ibx_pch_dpll_get_hw_state,
499 };
500 
501 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
502 			       struct intel_shared_dpll *pll)
503 {
504 	const enum intel_dpll_id id = pll->info->id;
505 
506 	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
507 	POSTING_READ(WRPLL_CTL(id));
508 	udelay(20);
509 }
510 
511 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
512 				struct intel_shared_dpll *pll)
513 {
514 	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
515 	POSTING_READ(SPLL_CTL);
516 	udelay(20);
517 }
518 
519 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
520 				  struct intel_shared_dpll *pll)
521 {
522 	const enum intel_dpll_id id = pll->info->id;
523 	u32 val;
524 
525 	val = I915_READ(WRPLL_CTL(id));
526 	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
527 	POSTING_READ(WRPLL_CTL(id));
528 }
529 
530 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
531 				 struct intel_shared_dpll *pll)
532 {
533 	u32 val;
534 
535 	val = I915_READ(SPLL_CTL);
536 	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
537 	POSTING_READ(SPLL_CTL);
538 }
539 
540 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
541 				       struct intel_shared_dpll *pll,
542 				       struct intel_dpll_hw_state *hw_state)
543 {
544 	const enum intel_dpll_id id = pll->info->id;
545 	intel_wakeref_t wakeref;
546 	u32 val;
547 
548 	wakeref = intel_display_power_get_if_enabled(dev_priv,
549 						     POWER_DOMAIN_DISPLAY_CORE);
550 	if (!wakeref)
551 		return false;
552 
553 	val = I915_READ(WRPLL_CTL(id));
554 	hw_state->wrpll = val;
555 
556 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
557 
558 	return val & WRPLL_PLL_ENABLE;
559 }
560 
561 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
562 				      struct intel_shared_dpll *pll,
563 				      struct intel_dpll_hw_state *hw_state)
564 {
565 	intel_wakeref_t wakeref;
566 	u32 val;
567 
568 	wakeref = intel_display_power_get_if_enabled(dev_priv,
569 						     POWER_DOMAIN_DISPLAY_CORE);
570 	if (!wakeref)
571 		return false;
572 
573 	val = I915_READ(SPLL_CTL);
574 	hw_state->spll = val;
575 
576 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
577 
578 	return val & SPLL_PLL_ENABLE;
579 }
580 
581 #define LC_FREQ 2700
582 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
583 
584 #define P_MIN 2
585 #define P_MAX 64
586 #define P_INC 2
587 
588 /* Constraints for PLL good behavior */
589 #define REF_MIN 48
590 #define REF_MAX 400
591 #define VCO_MIN 2400
592 #define VCO_MAX 4800
593 
594 struct hsw_wrpll_rnp {
595 	unsigned p, n2, r2;
596 };
597 
598 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
599 {
600 	unsigned budget;
601 
602 	switch (clock) {
603 	case 25175000:
604 	case 25200000:
605 	case 27000000:
606 	case 27027000:
607 	case 37762500:
608 	case 37800000:
609 	case 40500000:
610 	case 40541000:
611 	case 54000000:
612 	case 54054000:
613 	case 59341000:
614 	case 59400000:
615 	case 72000000:
616 	case 74176000:
617 	case 74250000:
618 	case 81000000:
619 	case 81081000:
620 	case 89012000:
621 	case 89100000:
622 	case 108000000:
623 	case 108108000:
624 	case 111264000:
625 	case 111375000:
626 	case 148352000:
627 	case 148500000:
628 	case 162000000:
629 	case 162162000:
630 	case 222525000:
631 	case 222750000:
632 	case 296703000:
633 	case 297000000:
634 		budget = 0;
635 		break;
636 	case 233500000:
637 	case 245250000:
638 	case 247750000:
639 	case 253250000:
640 	case 298000000:
641 		budget = 1500;
642 		break;
643 	case 169128000:
644 	case 169500000:
645 	case 179500000:
646 	case 202000000:
647 		budget = 2000;
648 		break;
649 	case 256250000:
650 	case 262500000:
651 	case 270000000:
652 	case 272500000:
653 	case 273750000:
654 	case 280750000:
655 	case 281250000:
656 	case 286000000:
657 	case 291750000:
658 		budget = 4000;
659 		break;
660 	case 267250000:
661 	case 268500000:
662 		budget = 5000;
663 		break;
664 	default:
665 		budget = 1000;
666 		break;
667 	}
668 
669 	return budget;
670 }
671 
672 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
673 				 unsigned int r2, unsigned int n2,
674 				 unsigned int p,
675 				 struct hsw_wrpll_rnp *best)
676 {
677 	u64 a, b, c, d, diff, diff_best;
678 
679 	/* No best (r,n,p) yet */
680 	if (best->p == 0) {
681 		best->p = p;
682 		best->n2 = n2;
683 		best->r2 = r2;
684 		return;
685 	}
686 
687 	/*
688 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
689 	 * freq2k.
690 	 *
691 	 * delta = 1e6 *
692 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
693 	 *	   freq2k;
694 	 *
695 	 * and we would like delta <= budget.
696 	 *
697 	 * If the discrepancy is above the PPM-based budget, always prefer to
698 	 * improve upon the previous solution.  However, if you're within the
699 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
700 	 */
701 	a = freq2k * budget * p * r2;
702 	b = freq2k * budget * best->p * best->r2;
703 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
704 	diff_best = abs_diff(freq2k * best->p * best->r2,
705 			     LC_FREQ_2K * best->n2);
706 	c = 1000000 * diff;
707 	d = 1000000 * diff_best;
708 
709 	if (a < c && b < d) {
710 		/* If both are above the budget, pick the closer */
711 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
712 			best->p = p;
713 			best->n2 = n2;
714 			best->r2 = r2;
715 		}
716 	} else if (a >= c && b < d) {
717 		/* If A is below the threshold but B is above it?  Update. */
718 		best->p = p;
719 		best->n2 = n2;
720 		best->r2 = r2;
721 	} else if (a >= c && b >= d) {
722 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
723 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
724 			best->p = p;
725 			best->n2 = n2;
726 			best->r2 = r2;
727 		}
728 	}
729 	/* Otherwise a < c && b >= d, do nothing */
730 }
731 
732 static void
733 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
734 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
735 {
736 	u64 freq2k;
737 	unsigned p, n2, r2;
738 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
739 	unsigned budget;
740 
741 	freq2k = clock / 100;
742 
743 	budget = hsw_wrpll_get_budget_for_freq(clock);
744 
745 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
746 	 * and directly pass the LC PLL to it. */
747 	if (freq2k == 5400000) {
748 		*n2_out = 2;
749 		*p_out = 1;
750 		*r2_out = 2;
751 		return;
752 	}
753 
754 	/*
755 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
756 	 * the WR PLL.
757 	 *
758 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
759 	 * Injecting R2 = 2 * R gives:
760 	 *   REF_MAX * r2 > LC_FREQ * 2 and
761 	 *   REF_MIN * r2 < LC_FREQ * 2
762 	 *
763 	 * Which means the desired boundaries for r2 are:
764 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
765 	 *
766 	 */
767 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
768 	     r2 <= LC_FREQ * 2 / REF_MIN;
769 	     r2++) {
770 
771 		/*
772 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
773 		 *
774 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
775 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
776 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
777 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
778 		 *
779 		 * Which means the desired boundaries for n2 are:
780 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
781 		 */
782 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
783 		     n2 <= VCO_MAX * r2 / LC_FREQ;
784 		     n2++) {
785 
786 			for (p = P_MIN; p <= P_MAX; p += P_INC)
787 				hsw_wrpll_update_rnp(freq2k, budget,
788 						     r2, n2, p, &best);
789 		}
790 	}
791 
792 	*n2_out = best.n2;
793 	*p_out = best.p;
794 	*r2_out = best.r2;
795 }
796 
797 static struct intel_shared_dpll *
798 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
799 		      struct intel_crtc *crtc)
800 {
801 	struct intel_crtc_state *crtc_state =
802 		intel_atomic_get_new_crtc_state(state, crtc);
803 	struct intel_shared_dpll *pll;
804 	u32 val;
805 	unsigned int p, n2, r2;
806 
807 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
808 
809 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
810 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
811 	      WRPLL_DIVIDER_POST(p);
812 
813 	crtc_state->dpll_hw_state.wrpll = val;
814 
815 	pll = intel_find_shared_dpll(state, crtc,
816 				     &crtc_state->dpll_hw_state,
817 				     DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
818 
819 	if (!pll)
820 		return NULL;
821 
822 	return pll;
823 }
824 
825 static struct intel_shared_dpll *
826 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
827 {
828 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
829 	struct intel_shared_dpll *pll;
830 	enum intel_dpll_id pll_id;
831 	int clock = crtc_state->port_clock;
832 
833 	switch (clock / 2) {
834 	case 81000:
835 		pll_id = DPLL_ID_LCPLL_810;
836 		break;
837 	case 135000:
838 		pll_id = DPLL_ID_LCPLL_1350;
839 		break;
840 	case 270000:
841 		pll_id = DPLL_ID_LCPLL_2700;
842 		break;
843 	default:
844 		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
845 		return NULL;
846 	}
847 
848 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
849 
850 	if (!pll)
851 		return NULL;
852 
853 	return pll;
854 }
855 
856 static bool hsw_get_dpll(struct intel_atomic_state *state,
857 			 struct intel_crtc *crtc,
858 			 struct intel_encoder *encoder)
859 {
860 	struct intel_crtc_state *crtc_state =
861 		intel_atomic_get_new_crtc_state(state, crtc);
862 	struct intel_shared_dpll *pll;
863 
864 	memset(&crtc_state->dpll_hw_state, 0,
865 	       sizeof(crtc_state->dpll_hw_state));
866 
867 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
868 		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
869 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
870 		pll = hsw_ddi_dp_get_dpll(crtc_state);
871 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
872 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
873 			return false;
874 
875 		crtc_state->dpll_hw_state.spll =
876 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
877 
878 		pll = intel_find_shared_dpll(state, crtc,
879 					     &crtc_state->dpll_hw_state,
880 					     DPLL_ID_SPLL, DPLL_ID_SPLL);
881 	} else {
882 		return false;
883 	}
884 
885 	if (!pll)
886 		return false;
887 
888 	intel_reference_shared_dpll(state, crtc,
889 				    pll, &crtc_state->dpll_hw_state);
890 
891 	crtc_state->shared_dpll = pll;
892 
893 	return true;
894 }
895 
896 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
897 			      const struct intel_dpll_hw_state *hw_state)
898 {
899 	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
900 		      hw_state->wrpll, hw_state->spll);
901 }
902 
903 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
904 	.enable = hsw_ddi_wrpll_enable,
905 	.disable = hsw_ddi_wrpll_disable,
906 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
907 };
908 
909 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
910 	.enable = hsw_ddi_spll_enable,
911 	.disable = hsw_ddi_spll_disable,
912 	.get_hw_state = hsw_ddi_spll_get_hw_state,
913 };
914 
915 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
916 				 struct intel_shared_dpll *pll)
917 {
918 }
919 
920 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
921 				  struct intel_shared_dpll *pll)
922 {
923 }
924 
925 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
926 				       struct intel_shared_dpll *pll,
927 				       struct intel_dpll_hw_state *hw_state)
928 {
929 	return true;
930 }
931 
932 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
933 	.enable = hsw_ddi_lcpll_enable,
934 	.disable = hsw_ddi_lcpll_disable,
935 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
936 };
937 
938 struct skl_dpll_regs {
939 	i915_reg_t ctl, cfgcr1, cfgcr2;
940 };
941 
942 /* this array is indexed by the *shared* pll id */
943 static const struct skl_dpll_regs skl_dpll_regs[4] = {
944 	{
945 		/* DPLL 0 */
946 		.ctl = LCPLL1_CTL,
947 		/* DPLL 0 doesn't support HDMI mode */
948 	},
949 	{
950 		/* DPLL 1 */
951 		.ctl = LCPLL2_CTL,
952 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
953 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
954 	},
955 	{
956 		/* DPLL 2 */
957 		.ctl = WRPLL_CTL(0),
958 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
959 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
960 	},
961 	{
962 		/* DPLL 3 */
963 		.ctl = WRPLL_CTL(1),
964 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
965 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
966 	},
967 };
968 
969 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
970 				    struct intel_shared_dpll *pll)
971 {
972 	const enum intel_dpll_id id = pll->info->id;
973 	u32 val;
974 
975 	val = I915_READ(DPLL_CTRL1);
976 
977 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
978 		 DPLL_CTRL1_SSC(id) |
979 		 DPLL_CTRL1_LINK_RATE_MASK(id));
980 	val |= pll->state.hw_state.ctrl1 << (id * 6);
981 
982 	I915_WRITE(DPLL_CTRL1, val);
983 	POSTING_READ(DPLL_CTRL1);
984 }
985 
986 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
987 			       struct intel_shared_dpll *pll)
988 {
989 	const struct skl_dpll_regs *regs = skl_dpll_regs;
990 	const enum intel_dpll_id id = pll->info->id;
991 
992 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
993 
994 	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
995 	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
996 	POSTING_READ(regs[id].cfgcr1);
997 	POSTING_READ(regs[id].cfgcr2);
998 
999 	/* the enable bit is always bit 31 */
1000 	I915_WRITE(regs[id].ctl,
1001 		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
1002 
1003 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1004 		DRM_ERROR("DPLL %d not locked\n", id);
1005 }
1006 
1007 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1008 				 struct intel_shared_dpll *pll)
1009 {
1010 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1011 }
1012 
1013 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1014 				struct intel_shared_dpll *pll)
1015 {
1016 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1017 	const enum intel_dpll_id id = pll->info->id;
1018 
1019 	/* the enable bit is always bit 31 */
1020 	I915_WRITE(regs[id].ctl,
1021 		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1022 	POSTING_READ(regs[id].ctl);
1023 }
1024 
1025 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1026 				  struct intel_shared_dpll *pll)
1027 {
1028 }
1029 
1030 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1031 				     struct intel_shared_dpll *pll,
1032 				     struct intel_dpll_hw_state *hw_state)
1033 {
1034 	u32 val;
1035 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1036 	const enum intel_dpll_id id = pll->info->id;
1037 	intel_wakeref_t wakeref;
1038 	bool ret;
1039 
1040 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1041 						     POWER_DOMAIN_DISPLAY_CORE);
1042 	if (!wakeref)
1043 		return false;
1044 
1045 	ret = false;
1046 
1047 	val = I915_READ(regs[id].ctl);
1048 	if (!(val & LCPLL_PLL_ENABLE))
1049 		goto out;
1050 
1051 	val = I915_READ(DPLL_CTRL1);
1052 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1053 
1054 	/* avoid reading back stale values if HDMI mode is not enabled */
1055 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1056 		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1057 		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1058 	}
1059 	ret = true;
1060 
1061 out:
1062 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1063 
1064 	return ret;
1065 }
1066 
1067 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1068 				       struct intel_shared_dpll *pll,
1069 				       struct intel_dpll_hw_state *hw_state)
1070 {
1071 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1072 	const enum intel_dpll_id id = pll->info->id;
1073 	intel_wakeref_t wakeref;
1074 	u32 val;
1075 	bool ret;
1076 
1077 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1078 						     POWER_DOMAIN_DISPLAY_CORE);
1079 	if (!wakeref)
1080 		return false;
1081 
1082 	ret = false;
1083 
1084 	/* DPLL0 is always enabled since it drives CDCLK */
1085 	val = I915_READ(regs[id].ctl);
1086 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1087 		goto out;
1088 
1089 	val = I915_READ(DPLL_CTRL1);
1090 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1091 
1092 	ret = true;
1093 
1094 out:
1095 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1096 
1097 	return ret;
1098 }
1099 
1100 struct skl_wrpll_context {
1101 	u64 min_deviation;		/* current minimal deviation */
1102 	u64 central_freq;		/* chosen central freq */
1103 	u64 dco_freq;			/* chosen dco freq */
1104 	unsigned int p;			/* chosen divider */
1105 };
1106 
1107 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1108 {
1109 	memset(ctx, 0, sizeof(*ctx));
1110 
1111 	ctx->min_deviation = U64_MAX;
1112 }
1113 
1114 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1115 #define SKL_DCO_MAX_PDEVIATION	100
1116 #define SKL_DCO_MAX_NDEVIATION	600
1117 
1118 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1119 				  u64 central_freq,
1120 				  u64 dco_freq,
1121 				  unsigned int divider)
1122 {
1123 	u64 deviation;
1124 
1125 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1126 			      central_freq);
1127 
1128 	/* positive deviation */
1129 	if (dco_freq >= central_freq) {
1130 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1131 		    deviation < ctx->min_deviation) {
1132 			ctx->min_deviation = deviation;
1133 			ctx->central_freq = central_freq;
1134 			ctx->dco_freq = dco_freq;
1135 			ctx->p = divider;
1136 		}
1137 	/* negative deviation */
1138 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1139 		   deviation < ctx->min_deviation) {
1140 		ctx->min_deviation = deviation;
1141 		ctx->central_freq = central_freq;
1142 		ctx->dco_freq = dco_freq;
1143 		ctx->p = divider;
1144 	}
1145 }
1146 
1147 static void skl_wrpll_get_multipliers(unsigned int p,
1148 				      unsigned int *p0 /* out */,
1149 				      unsigned int *p1 /* out */,
1150 				      unsigned int *p2 /* out */)
1151 {
1152 	/* even dividers */
1153 	if (p % 2 == 0) {
1154 		unsigned int half = p / 2;
1155 
1156 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1157 			*p0 = 2;
1158 			*p1 = 1;
1159 			*p2 = half;
1160 		} else if (half % 2 == 0) {
1161 			*p0 = 2;
1162 			*p1 = half / 2;
1163 			*p2 = 2;
1164 		} else if (half % 3 == 0) {
1165 			*p0 = 3;
1166 			*p1 = half / 3;
1167 			*p2 = 2;
1168 		} else if (half % 7 == 0) {
1169 			*p0 = 7;
1170 			*p1 = half / 7;
1171 			*p2 = 2;
1172 		}
1173 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1174 		*p0 = 3;
1175 		*p1 = 1;
1176 		*p2 = p / 3;
1177 	} else if (p == 5 || p == 7) {
1178 		*p0 = p;
1179 		*p1 = 1;
1180 		*p2 = 1;
1181 	} else if (p == 15) {
1182 		*p0 = 3;
1183 		*p1 = 1;
1184 		*p2 = 5;
1185 	} else if (p == 21) {
1186 		*p0 = 7;
1187 		*p1 = 1;
1188 		*p2 = 3;
1189 	} else if (p == 35) {
1190 		*p0 = 7;
1191 		*p1 = 1;
1192 		*p2 = 5;
1193 	}
1194 }
1195 
1196 struct skl_wrpll_params {
1197 	u32 dco_fraction;
1198 	u32 dco_integer;
1199 	u32 qdiv_ratio;
1200 	u32 qdiv_mode;
1201 	u32 kdiv;
1202 	u32 pdiv;
1203 	u32 central_freq;
1204 };
1205 
1206 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1207 				      u64 afe_clock,
1208 				      u64 central_freq,
1209 				      u32 p0, u32 p1, u32 p2)
1210 {
1211 	u64 dco_freq;
1212 
1213 	switch (central_freq) {
1214 	case 9600000000ULL:
1215 		params->central_freq = 0;
1216 		break;
1217 	case 9000000000ULL:
1218 		params->central_freq = 1;
1219 		break;
1220 	case 8400000000ULL:
1221 		params->central_freq = 3;
1222 	}
1223 
1224 	switch (p0) {
1225 	case 1:
1226 		params->pdiv = 0;
1227 		break;
1228 	case 2:
1229 		params->pdiv = 1;
1230 		break;
1231 	case 3:
1232 		params->pdiv = 2;
1233 		break;
1234 	case 7:
1235 		params->pdiv = 4;
1236 		break;
1237 	default:
1238 		WARN(1, "Incorrect PDiv\n");
1239 	}
1240 
1241 	switch (p2) {
1242 	case 5:
1243 		params->kdiv = 0;
1244 		break;
1245 	case 2:
1246 		params->kdiv = 1;
1247 		break;
1248 	case 3:
1249 		params->kdiv = 2;
1250 		break;
1251 	case 1:
1252 		params->kdiv = 3;
1253 		break;
1254 	default:
1255 		WARN(1, "Incorrect KDiv\n");
1256 	}
1257 
1258 	params->qdiv_ratio = p1;
1259 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1260 
1261 	dco_freq = p0 * p1 * p2 * afe_clock;
1262 
1263 	/*
1264 	 * Intermediate values are in Hz.
1265 	 * Divide by MHz to match bsepc
1266 	 */
1267 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1268 	params->dco_fraction =
1269 		div_u64((div_u64(dco_freq, 24) -
1270 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1271 }
1272 
1273 static bool
1274 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1275 			struct skl_wrpll_params *wrpll_params)
1276 {
1277 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1278 	u64 dco_central_freq[3] = { 8400000000ULL,
1279 				    9000000000ULL,
1280 				    9600000000ULL };
1281 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1282 					     24, 28, 30, 32, 36, 40, 42, 44,
1283 					     48, 52, 54, 56, 60, 64, 66, 68,
1284 					     70, 72, 76, 78, 80, 84, 88, 90,
1285 					     92, 96, 98 };
1286 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1287 	static const struct {
1288 		const int *list;
1289 		int n_dividers;
1290 	} dividers[] = {
1291 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1292 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1293 	};
1294 	struct skl_wrpll_context ctx;
1295 	unsigned int dco, d, i;
1296 	unsigned int p0, p1, p2;
1297 
1298 	skl_wrpll_context_init(&ctx);
1299 
1300 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1301 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1302 			for (i = 0; i < dividers[d].n_dividers; i++) {
1303 				unsigned int p = dividers[d].list[i];
1304 				u64 dco_freq = p * afe_clock;
1305 
1306 				skl_wrpll_try_divider(&ctx,
1307 						      dco_central_freq[dco],
1308 						      dco_freq,
1309 						      p);
1310 				/*
1311 				 * Skip the remaining dividers if we're sure to
1312 				 * have found the definitive divider, we can't
1313 				 * improve a 0 deviation.
1314 				 */
1315 				if (ctx.min_deviation == 0)
1316 					goto skip_remaining_dividers;
1317 			}
1318 		}
1319 
1320 skip_remaining_dividers:
1321 		/*
1322 		 * If a solution is found with an even divider, prefer
1323 		 * this one.
1324 		 */
1325 		if (d == 0 && ctx.p)
1326 			break;
1327 	}
1328 
1329 	if (!ctx.p) {
1330 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1331 		return false;
1332 	}
1333 
1334 	/*
1335 	 * gcc incorrectly analyses that these can be used without being
1336 	 * initialized. To be fair, it's hard to guess.
1337 	 */
1338 	p0 = p1 = p2 = 0;
1339 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1340 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1341 				  p0, p1, p2);
1342 
1343 	return true;
1344 }
1345 
1346 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1347 {
1348 	u32 ctrl1, cfgcr1, cfgcr2;
1349 	struct skl_wrpll_params wrpll_params = { 0, };
1350 
1351 	/*
1352 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1353 	 * as the DPLL id in this function.
1354 	 */
1355 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1356 
1357 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1358 
1359 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1360 				     &wrpll_params))
1361 		return false;
1362 
1363 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1364 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1365 		wrpll_params.dco_integer;
1366 
1367 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1368 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1369 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1370 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1371 		wrpll_params.central_freq;
1372 
1373 	memset(&crtc_state->dpll_hw_state, 0,
1374 	       sizeof(crtc_state->dpll_hw_state));
1375 
1376 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1377 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1378 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1379 	return true;
1380 }
1381 
1382 static bool
1383 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1384 {
1385 	u32 ctrl1;
1386 
1387 	/*
1388 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1389 	 * as the DPLL id in this function.
1390 	 */
1391 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1392 	switch (crtc_state->port_clock / 2) {
1393 	case 81000:
1394 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1395 		break;
1396 	case 135000:
1397 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1398 		break;
1399 	case 270000:
1400 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1401 		break;
1402 		/* eDP 1.4 rates */
1403 	case 162000:
1404 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1405 		break;
1406 	case 108000:
1407 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1408 		break;
1409 	case 216000:
1410 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1411 		break;
1412 	}
1413 
1414 	memset(&crtc_state->dpll_hw_state, 0,
1415 	       sizeof(crtc_state->dpll_hw_state));
1416 
1417 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1418 
1419 	return true;
1420 }
1421 
1422 static bool skl_get_dpll(struct intel_atomic_state *state,
1423 			 struct intel_crtc *crtc,
1424 			 struct intel_encoder *encoder)
1425 {
1426 	struct intel_crtc_state *crtc_state =
1427 		intel_atomic_get_new_crtc_state(state, crtc);
1428 	struct intel_shared_dpll *pll;
1429 	bool bret;
1430 
1431 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1432 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1433 		if (!bret) {
1434 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1435 			return false;
1436 		}
1437 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1438 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1439 		if (!bret) {
1440 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1441 			return false;
1442 		}
1443 	} else {
1444 		return false;
1445 	}
1446 
1447 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1448 		pll = intel_find_shared_dpll(state, crtc,
1449 					     &crtc_state->dpll_hw_state,
1450 					     DPLL_ID_SKL_DPLL0,
1451 					     DPLL_ID_SKL_DPLL0);
1452 	else
1453 		pll = intel_find_shared_dpll(state, crtc,
1454 					     &crtc_state->dpll_hw_state,
1455 					     DPLL_ID_SKL_DPLL1,
1456 					     DPLL_ID_SKL_DPLL3);
1457 	if (!pll)
1458 		return false;
1459 
1460 	intel_reference_shared_dpll(state, crtc,
1461 				    pll, &crtc_state->dpll_hw_state);
1462 
1463 	crtc_state->shared_dpll = pll;
1464 
1465 	return true;
1466 }
1467 
1468 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1469 			      const struct intel_dpll_hw_state *hw_state)
1470 {
1471 	DRM_DEBUG_KMS("dpll_hw_state: "
1472 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1473 		      hw_state->ctrl1,
1474 		      hw_state->cfgcr1,
1475 		      hw_state->cfgcr2);
1476 }
1477 
1478 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1479 	.enable = skl_ddi_pll_enable,
1480 	.disable = skl_ddi_pll_disable,
1481 	.get_hw_state = skl_ddi_pll_get_hw_state,
1482 };
1483 
1484 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1485 	.enable = skl_ddi_dpll0_enable,
1486 	.disable = skl_ddi_dpll0_disable,
1487 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1488 };
1489 
1490 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1491 				struct intel_shared_dpll *pll)
1492 {
1493 	u32 temp;
1494 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1495 	enum dpio_phy phy;
1496 	enum dpio_channel ch;
1497 
1498 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1499 
1500 	/* Non-SSC reference */
1501 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1502 	temp |= PORT_PLL_REF_SEL;
1503 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1504 
1505 	if (IS_GEMINILAKE(dev_priv)) {
1506 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1507 		temp |= PORT_PLL_POWER_ENABLE;
1508 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1509 
1510 		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1511 				 PORT_PLL_POWER_STATE), 200))
1512 			DRM_ERROR("Power state not set for PLL:%d\n", port);
1513 	}
1514 
1515 	/* Disable 10 bit clock */
1516 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1517 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1518 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1519 
1520 	/* Write P1 & P2 */
1521 	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1522 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1523 	temp |= pll->state.hw_state.ebb0;
1524 	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1525 
1526 	/* Write M2 integer */
1527 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1528 	temp &= ~PORT_PLL_M2_MASK;
1529 	temp |= pll->state.hw_state.pll0;
1530 	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1531 
1532 	/* Write N */
1533 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1534 	temp &= ~PORT_PLL_N_MASK;
1535 	temp |= pll->state.hw_state.pll1;
1536 	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1537 
1538 	/* Write M2 fraction */
1539 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1540 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1541 	temp |= pll->state.hw_state.pll2;
1542 	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1543 
1544 	/* Write M2 fraction enable */
1545 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1546 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1547 	temp |= pll->state.hw_state.pll3;
1548 	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1549 
1550 	/* Write coeff */
1551 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1552 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1553 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1554 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1555 	temp |= pll->state.hw_state.pll6;
1556 	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1557 
1558 	/* Write calibration val */
1559 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1560 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1561 	temp |= pll->state.hw_state.pll8;
1562 	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1563 
1564 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1565 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1566 	temp |= pll->state.hw_state.pll9;
1567 	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1568 
1569 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1570 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1571 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1572 	temp |= pll->state.hw_state.pll10;
1573 	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1574 
1575 	/* Recalibrate with new settings */
1576 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1577 	temp |= PORT_PLL_RECALIBRATE;
1578 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1579 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1580 	temp |= pll->state.hw_state.ebb4;
1581 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1582 
1583 	/* Enable PLL */
1584 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1585 	temp |= PORT_PLL_ENABLE;
1586 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1587 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1588 
1589 	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1590 			200))
1591 		DRM_ERROR("PLL %d not locked\n", port);
1592 
1593 	if (IS_GEMINILAKE(dev_priv)) {
1594 		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1595 		temp |= DCC_DELAY_RANGE_2;
1596 		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1597 	}
1598 
1599 	/*
1600 	 * While we write to the group register to program all lanes at once we
1601 	 * can read only lane registers and we pick lanes 0/1 for that.
1602 	 */
1603 	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1604 	temp &= ~LANE_STAGGER_MASK;
1605 	temp &= ~LANESTAGGER_STRAP_OVRD;
1606 	temp |= pll->state.hw_state.pcsdw12;
1607 	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1608 }
1609 
1610 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1611 					struct intel_shared_dpll *pll)
1612 {
1613 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1614 	u32 temp;
1615 
1616 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1617 	temp &= ~PORT_PLL_ENABLE;
1618 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1619 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1620 
1621 	if (IS_GEMINILAKE(dev_priv)) {
1622 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1623 		temp &= ~PORT_PLL_POWER_ENABLE;
1624 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1625 
1626 		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1627 				PORT_PLL_POWER_STATE), 200))
1628 			DRM_ERROR("Power state not reset for PLL:%d\n", port);
1629 	}
1630 }
1631 
1632 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1633 					struct intel_shared_dpll *pll,
1634 					struct intel_dpll_hw_state *hw_state)
1635 {
1636 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1637 	intel_wakeref_t wakeref;
1638 	enum dpio_phy phy;
1639 	enum dpio_channel ch;
1640 	u32 val;
1641 	bool ret;
1642 
1643 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1644 
1645 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1646 						     POWER_DOMAIN_DISPLAY_CORE);
1647 	if (!wakeref)
1648 		return false;
1649 
1650 	ret = false;
1651 
1652 	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1653 	if (!(val & PORT_PLL_ENABLE))
1654 		goto out;
1655 
1656 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1657 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1658 
1659 	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1660 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1661 
1662 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1663 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1664 
1665 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1666 	hw_state->pll1 &= PORT_PLL_N_MASK;
1667 
1668 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1669 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1670 
1671 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1672 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1673 
1674 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1675 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1676 			  PORT_PLL_INT_COEFF_MASK |
1677 			  PORT_PLL_GAIN_CTL_MASK;
1678 
1679 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1680 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1681 
1682 	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1683 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1684 
1685 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1686 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1687 			   PORT_PLL_DCO_AMP_MASK;
1688 
1689 	/*
1690 	 * While we write to the group register to program all lanes at once we
1691 	 * can read only lane registers. We configure all lanes the same way, so
1692 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1693 	 */
1694 	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1695 	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1696 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1697 				 hw_state->pcsdw12,
1698 				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1699 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1700 
1701 	ret = true;
1702 
1703 out:
1704 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1705 
1706 	return ret;
1707 }
1708 
1709 /* bxt clock parameters */
1710 struct bxt_clk_div {
1711 	int clock;
1712 	u32 p1;
1713 	u32 p2;
1714 	u32 m2_int;
1715 	u32 m2_frac;
1716 	bool m2_frac_en;
1717 	u32 n;
1718 
1719 	int vco;
1720 };
1721 
1722 /* pre-calculated values for DP linkrates */
1723 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1724 	{162000, 4, 2, 32, 1677722, 1, 1},
1725 	{270000, 4, 1, 27,       0, 0, 1},
1726 	{540000, 2, 1, 27,       0, 0, 1},
1727 	{216000, 3, 2, 32, 1677722, 1, 1},
1728 	{243000, 4, 1, 24, 1258291, 1, 1},
1729 	{324000, 4, 1, 32, 1677722, 1, 1},
1730 	{432000, 3, 1, 32, 1677722, 1, 1}
1731 };
1732 
1733 static bool
1734 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1735 			  struct bxt_clk_div *clk_div)
1736 {
1737 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1738 	struct dpll best_clock;
1739 
1740 	/* Calculate HDMI div */
1741 	/*
1742 	 * FIXME: tie the following calculation into
1743 	 * i9xx_crtc_compute_clock
1744 	 */
1745 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1746 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1747 				 crtc_state->port_clock,
1748 				 pipe_name(crtc->pipe));
1749 		return false;
1750 	}
1751 
1752 	clk_div->p1 = best_clock.p1;
1753 	clk_div->p2 = best_clock.p2;
1754 	WARN_ON(best_clock.m1 != 2);
1755 	clk_div->n = best_clock.n;
1756 	clk_div->m2_int = best_clock.m2 >> 22;
1757 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1758 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1759 
1760 	clk_div->vco = best_clock.vco;
1761 
1762 	return true;
1763 }
1764 
1765 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1766 				    struct bxt_clk_div *clk_div)
1767 {
1768 	int clock = crtc_state->port_clock;
1769 	int i;
1770 
1771 	*clk_div = bxt_dp_clk_val[0];
1772 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1773 		if (bxt_dp_clk_val[i].clock == clock) {
1774 			*clk_div = bxt_dp_clk_val[i];
1775 			break;
1776 		}
1777 	}
1778 
1779 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1780 }
1781 
1782 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1783 				      const struct bxt_clk_div *clk_div)
1784 {
1785 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1786 	int clock = crtc_state->port_clock;
1787 	int vco = clk_div->vco;
1788 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1789 	u32 lanestagger;
1790 
1791 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1792 
1793 	if (vco >= 6200000 && vco <= 6700000) {
1794 		prop_coef = 4;
1795 		int_coef = 9;
1796 		gain_ctl = 3;
1797 		targ_cnt = 8;
1798 	} else if ((vco > 5400000 && vco < 6200000) ||
1799 			(vco >= 4800000 && vco < 5400000)) {
1800 		prop_coef = 5;
1801 		int_coef = 11;
1802 		gain_ctl = 3;
1803 		targ_cnt = 9;
1804 	} else if (vco == 5400000) {
1805 		prop_coef = 3;
1806 		int_coef = 8;
1807 		gain_ctl = 1;
1808 		targ_cnt = 9;
1809 	} else {
1810 		DRM_ERROR("Invalid VCO\n");
1811 		return false;
1812 	}
1813 
1814 	if (clock > 270000)
1815 		lanestagger = 0x18;
1816 	else if (clock > 135000)
1817 		lanestagger = 0x0d;
1818 	else if (clock > 67000)
1819 		lanestagger = 0x07;
1820 	else if (clock > 33000)
1821 		lanestagger = 0x04;
1822 	else
1823 		lanestagger = 0x02;
1824 
1825 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1826 	dpll_hw_state->pll0 = clk_div->m2_int;
1827 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1828 	dpll_hw_state->pll2 = clk_div->m2_frac;
1829 
1830 	if (clk_div->m2_frac_en)
1831 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1832 
1833 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1834 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1835 
1836 	dpll_hw_state->pll8 = targ_cnt;
1837 
1838 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1839 
1840 	dpll_hw_state->pll10 =
1841 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1842 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1843 
1844 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1845 
1846 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1847 
1848 	return true;
1849 }
1850 
1851 static bool
1852 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1853 {
1854 	struct bxt_clk_div clk_div = {};
1855 
1856 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1857 
1858 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1859 }
1860 
1861 static bool
1862 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1863 {
1864 	struct bxt_clk_div clk_div = {};
1865 
1866 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1867 
1868 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1869 }
1870 
1871 static bool bxt_get_dpll(struct intel_atomic_state *state,
1872 			 struct intel_crtc *crtc,
1873 			 struct intel_encoder *encoder)
1874 {
1875 	struct intel_crtc_state *crtc_state =
1876 		intel_atomic_get_new_crtc_state(state, crtc);
1877 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1878 	struct intel_shared_dpll *pll;
1879 	enum intel_dpll_id id;
1880 
1881 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1882 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1883 		return false;
1884 
1885 	if (intel_crtc_has_dp_encoder(crtc_state) &&
1886 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1887 		return false;
1888 
1889 	/* 1:1 mapping between ports and PLLs */
1890 	id = (enum intel_dpll_id) encoder->port;
1891 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
1892 
1893 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1894 		      crtc->base.base.id, crtc->base.name, pll->info->name);
1895 
1896 	intel_reference_shared_dpll(state, crtc,
1897 				    pll, &crtc_state->dpll_hw_state);
1898 
1899 	crtc_state->shared_dpll = pll;
1900 
1901 	return true;
1902 }
1903 
1904 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1905 			      const struct intel_dpll_hw_state *hw_state)
1906 {
1907 	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1908 		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1909 		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1910 		      hw_state->ebb0,
1911 		      hw_state->ebb4,
1912 		      hw_state->pll0,
1913 		      hw_state->pll1,
1914 		      hw_state->pll2,
1915 		      hw_state->pll3,
1916 		      hw_state->pll6,
1917 		      hw_state->pll8,
1918 		      hw_state->pll9,
1919 		      hw_state->pll10,
1920 		      hw_state->pcsdw12);
1921 }
1922 
1923 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1924 	.enable = bxt_ddi_pll_enable,
1925 	.disable = bxt_ddi_pll_disable,
1926 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1927 };
1928 
1929 struct intel_dpll_mgr {
1930 	const struct dpll_info *dpll_info;
1931 
1932 	bool (*get_dplls)(struct intel_atomic_state *state,
1933 			  struct intel_crtc *crtc,
1934 			  struct intel_encoder *encoder);
1935 	void (*put_dplls)(struct intel_atomic_state *state,
1936 			  struct intel_crtc *crtc);
1937 	void (*update_active_dpll)(struct intel_atomic_state *state,
1938 				   struct intel_crtc *crtc,
1939 				   struct intel_encoder *encoder);
1940 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1941 			      const struct intel_dpll_hw_state *hw_state);
1942 };
1943 
1944 static const struct dpll_info pch_plls[] = {
1945 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1946 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1947 	{ },
1948 };
1949 
1950 static const struct intel_dpll_mgr pch_pll_mgr = {
1951 	.dpll_info = pch_plls,
1952 	.get_dplls = ibx_get_dpll,
1953 	.put_dplls = intel_put_dpll,
1954 	.dump_hw_state = ibx_dump_hw_state,
1955 };
1956 
1957 static const struct dpll_info hsw_plls[] = {
1958 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1959 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1960 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1961 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1962 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1963 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1964 	{ },
1965 };
1966 
1967 static const struct intel_dpll_mgr hsw_pll_mgr = {
1968 	.dpll_info = hsw_plls,
1969 	.get_dplls = hsw_get_dpll,
1970 	.put_dplls = intel_put_dpll,
1971 	.dump_hw_state = hsw_dump_hw_state,
1972 };
1973 
1974 static const struct dpll_info skl_plls[] = {
1975 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1976 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1977 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1978 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1979 	{ },
1980 };
1981 
1982 static const struct intel_dpll_mgr skl_pll_mgr = {
1983 	.dpll_info = skl_plls,
1984 	.get_dplls = skl_get_dpll,
1985 	.put_dplls = intel_put_dpll,
1986 	.dump_hw_state = skl_dump_hw_state,
1987 };
1988 
1989 static const struct dpll_info bxt_plls[] = {
1990 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
1991 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1992 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1993 	{ },
1994 };
1995 
1996 static const struct intel_dpll_mgr bxt_pll_mgr = {
1997 	.dpll_info = bxt_plls,
1998 	.get_dplls = bxt_get_dpll,
1999 	.put_dplls = intel_put_dpll,
2000 	.dump_hw_state = bxt_dump_hw_state,
2001 };
2002 
2003 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2004 			       struct intel_shared_dpll *pll)
2005 {
2006 	const enum intel_dpll_id id = pll->info->id;
2007 	u32 val;
2008 
2009 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2010 	val = I915_READ(CNL_DPLL_ENABLE(id));
2011 	val |= PLL_POWER_ENABLE;
2012 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2013 
2014 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2015 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2016 				  PLL_POWER_STATE, 5))
2017 		DRM_ERROR("PLL %d Power not enabled\n", id);
2018 
2019 	/*
2020 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2021 	 * select DP mode, and set DP link rate.
2022 	 */
2023 	val = pll->state.hw_state.cfgcr0;
2024 	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2025 
2026 	/* 4. Reab back to ensure writes completed */
2027 	POSTING_READ(CNL_DPLL_CFGCR0(id));
2028 
2029 	/* 3. Configure DPLL_CFGCR0 */
2030 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2031 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2032 		val = pll->state.hw_state.cfgcr1;
2033 		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2034 		/* 4. Reab back to ensure writes completed */
2035 		POSTING_READ(CNL_DPLL_CFGCR1(id));
2036 	}
2037 
2038 	/*
2039 	 * 5. If the frequency will result in a change to the voltage
2040 	 * requirement, follow the Display Voltage Frequency Switching
2041 	 * Sequence Before Frequency Change
2042 	 *
2043 	 * Note: DVFS is actually handled via the cdclk code paths,
2044 	 * hence we do nothing here.
2045 	 */
2046 
2047 	/* 6. Enable DPLL in DPLL_ENABLE. */
2048 	val = I915_READ(CNL_DPLL_ENABLE(id));
2049 	val |= PLL_ENABLE;
2050 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2051 
2052 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2053 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2054 		DRM_ERROR("PLL %d not locked\n", id);
2055 
2056 	/*
2057 	 * 8. If the frequency will result in a change to the voltage
2058 	 * requirement, follow the Display Voltage Frequency Switching
2059 	 * Sequence After Frequency Change
2060 	 *
2061 	 * Note: DVFS is actually handled via the cdclk code paths,
2062 	 * hence we do nothing here.
2063 	 */
2064 
2065 	/*
2066 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2067 	 * Done at intel_ddi_clk_select
2068 	 */
2069 }
2070 
2071 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2072 				struct intel_shared_dpll *pll)
2073 {
2074 	const enum intel_dpll_id id = pll->info->id;
2075 	u32 val;
2076 
2077 	/*
2078 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2079 	 * Done at intel_ddi_post_disable
2080 	 */
2081 
2082 	/*
2083 	 * 2. If the frequency will result in a change to the voltage
2084 	 * requirement, follow the Display Voltage Frequency Switching
2085 	 * Sequence Before Frequency Change
2086 	 *
2087 	 * Note: DVFS is actually handled via the cdclk code paths,
2088 	 * hence we do nothing here.
2089 	 */
2090 
2091 	/* 3. Disable DPLL through DPLL_ENABLE. */
2092 	val = I915_READ(CNL_DPLL_ENABLE(id));
2093 	val &= ~PLL_ENABLE;
2094 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2095 
2096 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2097 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2098 		DRM_ERROR("PLL %d locked\n", id);
2099 
2100 	/*
2101 	 * 5. If the frequency will result in a change to the voltage
2102 	 * requirement, follow the Display Voltage Frequency Switching
2103 	 * Sequence After Frequency Change
2104 	 *
2105 	 * Note: DVFS is actually handled via the cdclk code paths,
2106 	 * hence we do nothing here.
2107 	 */
2108 
2109 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2110 	val = I915_READ(CNL_DPLL_ENABLE(id));
2111 	val &= ~PLL_POWER_ENABLE;
2112 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2113 
2114 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2115 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2116 				    PLL_POWER_STATE, 5))
2117 		DRM_ERROR("PLL %d Power not disabled\n", id);
2118 }
2119 
2120 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2121 				     struct intel_shared_dpll *pll,
2122 				     struct intel_dpll_hw_state *hw_state)
2123 {
2124 	const enum intel_dpll_id id = pll->info->id;
2125 	intel_wakeref_t wakeref;
2126 	u32 val;
2127 	bool ret;
2128 
2129 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2130 						     POWER_DOMAIN_DISPLAY_CORE);
2131 	if (!wakeref)
2132 		return false;
2133 
2134 	ret = false;
2135 
2136 	val = I915_READ(CNL_DPLL_ENABLE(id));
2137 	if (!(val & PLL_ENABLE))
2138 		goto out;
2139 
2140 	val = I915_READ(CNL_DPLL_CFGCR0(id));
2141 	hw_state->cfgcr0 = val;
2142 
2143 	/* avoid reading back stale values if HDMI mode is not enabled */
2144 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2145 		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2146 	}
2147 	ret = true;
2148 
2149 out:
2150 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2151 
2152 	return ret;
2153 }
2154 
2155 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2156 				      int *qdiv, int *kdiv)
2157 {
2158 	/* even dividers */
2159 	if (bestdiv % 2 == 0) {
2160 		if (bestdiv == 2) {
2161 			*pdiv = 2;
2162 			*qdiv = 1;
2163 			*kdiv = 1;
2164 		} else if (bestdiv % 4 == 0) {
2165 			*pdiv = 2;
2166 			*qdiv = bestdiv / 4;
2167 			*kdiv = 2;
2168 		} else if (bestdiv % 6 == 0) {
2169 			*pdiv = 3;
2170 			*qdiv = bestdiv / 6;
2171 			*kdiv = 2;
2172 		} else if (bestdiv % 5 == 0) {
2173 			*pdiv = 5;
2174 			*qdiv = bestdiv / 10;
2175 			*kdiv = 2;
2176 		} else if (bestdiv % 14 == 0) {
2177 			*pdiv = 7;
2178 			*qdiv = bestdiv / 14;
2179 			*kdiv = 2;
2180 		}
2181 	} else {
2182 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2183 			*pdiv = bestdiv;
2184 			*qdiv = 1;
2185 			*kdiv = 1;
2186 		} else { /* 9, 15, 21 */
2187 			*pdiv = bestdiv / 3;
2188 			*qdiv = 1;
2189 			*kdiv = 3;
2190 		}
2191 	}
2192 }
2193 
2194 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2195 				      u32 dco_freq, u32 ref_freq,
2196 				      int pdiv, int qdiv, int kdiv)
2197 {
2198 	u32 dco;
2199 
2200 	switch (kdiv) {
2201 	case 1:
2202 		params->kdiv = 1;
2203 		break;
2204 	case 2:
2205 		params->kdiv = 2;
2206 		break;
2207 	case 3:
2208 		params->kdiv = 4;
2209 		break;
2210 	default:
2211 		WARN(1, "Incorrect KDiv\n");
2212 	}
2213 
2214 	switch (pdiv) {
2215 	case 2:
2216 		params->pdiv = 1;
2217 		break;
2218 	case 3:
2219 		params->pdiv = 2;
2220 		break;
2221 	case 5:
2222 		params->pdiv = 4;
2223 		break;
2224 	case 7:
2225 		params->pdiv = 8;
2226 		break;
2227 	default:
2228 		WARN(1, "Incorrect PDiv\n");
2229 	}
2230 
2231 	WARN_ON(kdiv != 2 && qdiv != 1);
2232 
2233 	params->qdiv_ratio = qdiv;
2234 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2235 
2236 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2237 
2238 	params->dco_integer = dco >> 15;
2239 	params->dco_fraction = dco & 0x7fff;
2240 }
2241 
2242 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2243 {
2244 	int ref_clock = dev_priv->cdclk.hw.ref;
2245 
2246 	/*
2247 	 * For ICL+, the spec states: if reference frequency is 38.4,
2248 	 * use 19.2 because the DPLL automatically divides that by 2.
2249 	 */
2250 	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2251 		ref_clock = 19200;
2252 
2253 	return ref_clock;
2254 }
2255 
2256 static bool
2257 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2258 			struct skl_wrpll_params *wrpll_params)
2259 {
2260 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2261 	u32 afe_clock = crtc_state->port_clock * 5;
2262 	u32 ref_clock;
2263 	u32 dco_min = 7998000;
2264 	u32 dco_max = 10000000;
2265 	u32 dco_mid = (dco_min + dco_max) / 2;
2266 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2267 					 18, 20, 24, 28, 30, 32,  36,  40,
2268 					 42, 44, 48, 50, 52, 54,  56,  60,
2269 					 64, 66, 68, 70, 72, 76,  78,  80,
2270 					 84, 88, 90, 92, 96, 98, 100, 102,
2271 					  3,  5,  7,  9, 15, 21 };
2272 	u32 dco, best_dco = 0, dco_centrality = 0;
2273 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2274 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2275 
2276 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2277 		dco = afe_clock * dividers[d];
2278 
2279 		if ((dco <= dco_max) && (dco >= dco_min)) {
2280 			dco_centrality = abs(dco - dco_mid);
2281 
2282 			if (dco_centrality < best_dco_centrality) {
2283 				best_dco_centrality = dco_centrality;
2284 				best_div = dividers[d];
2285 				best_dco = dco;
2286 			}
2287 		}
2288 	}
2289 
2290 	if (best_div == 0)
2291 		return false;
2292 
2293 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2294 
2295 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2296 
2297 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2298 				  pdiv, qdiv, kdiv);
2299 
2300 	return true;
2301 }
2302 
2303 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2304 {
2305 	u32 cfgcr0, cfgcr1;
2306 	struct skl_wrpll_params wrpll_params = { 0, };
2307 
2308 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2309 
2310 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2311 		return false;
2312 
2313 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2314 		wrpll_params.dco_integer;
2315 
2316 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2317 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2318 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2319 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2320 		DPLL_CFGCR1_CENTRAL_FREQ;
2321 
2322 	memset(&crtc_state->dpll_hw_state, 0,
2323 	       sizeof(crtc_state->dpll_hw_state));
2324 
2325 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2326 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2327 	return true;
2328 }
2329 
2330 static bool
2331 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2332 {
2333 	u32 cfgcr0;
2334 
2335 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2336 
2337 	switch (crtc_state->port_clock / 2) {
2338 	case 81000:
2339 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2340 		break;
2341 	case 135000:
2342 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2343 		break;
2344 	case 270000:
2345 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2346 		break;
2347 		/* eDP 1.4 rates */
2348 	case 162000:
2349 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2350 		break;
2351 	case 108000:
2352 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2353 		break;
2354 	case 216000:
2355 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2356 		break;
2357 	case 324000:
2358 		/* Some SKUs may require elevated I/O voltage to support this */
2359 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2360 		break;
2361 	case 405000:
2362 		/* Some SKUs may require elevated I/O voltage to support this */
2363 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2364 		break;
2365 	}
2366 
2367 	memset(&crtc_state->dpll_hw_state, 0,
2368 	       sizeof(crtc_state->dpll_hw_state));
2369 
2370 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2371 
2372 	return true;
2373 }
2374 
2375 static bool cnl_get_dpll(struct intel_atomic_state *state,
2376 			 struct intel_crtc *crtc,
2377 			 struct intel_encoder *encoder)
2378 {
2379 	struct intel_crtc_state *crtc_state =
2380 		intel_atomic_get_new_crtc_state(state, crtc);
2381 	struct intel_shared_dpll *pll;
2382 	bool bret;
2383 
2384 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2385 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2386 		if (!bret) {
2387 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2388 			return false;
2389 		}
2390 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2391 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2392 		if (!bret) {
2393 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2394 			return false;
2395 		}
2396 	} else {
2397 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2398 			      crtc_state->output_types);
2399 		return false;
2400 	}
2401 
2402 	pll = intel_find_shared_dpll(state, crtc,
2403 				     &crtc_state->dpll_hw_state,
2404 				     DPLL_ID_SKL_DPLL0,
2405 				     DPLL_ID_SKL_DPLL2);
2406 	if (!pll) {
2407 		DRM_DEBUG_KMS("No PLL selected\n");
2408 		return false;
2409 	}
2410 
2411 	intel_reference_shared_dpll(state, crtc,
2412 				    pll, &crtc_state->dpll_hw_state);
2413 
2414 	crtc_state->shared_dpll = pll;
2415 
2416 	return true;
2417 }
2418 
2419 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2420 			      const struct intel_dpll_hw_state *hw_state)
2421 {
2422 	DRM_DEBUG_KMS("dpll_hw_state: "
2423 		      "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2424 		      hw_state->cfgcr0,
2425 		      hw_state->cfgcr1);
2426 }
2427 
2428 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2429 	.enable = cnl_ddi_pll_enable,
2430 	.disable = cnl_ddi_pll_disable,
2431 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2432 };
2433 
2434 static const struct dpll_info cnl_plls[] = {
2435 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2436 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2437 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2438 	{ },
2439 };
2440 
2441 static const struct intel_dpll_mgr cnl_pll_mgr = {
2442 	.dpll_info = cnl_plls,
2443 	.get_dplls = cnl_get_dpll,
2444 	.put_dplls = intel_put_dpll,
2445 	.dump_hw_state = cnl_dump_hw_state,
2446 };
2447 
2448 struct icl_combo_pll_params {
2449 	int clock;
2450 	struct skl_wrpll_params wrpll;
2451 };
2452 
2453 /*
2454  * These values alrea already adjusted: they're the bits we write to the
2455  * registers, not the logical values.
2456  */
2457 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2458 	{ 540000,
2459 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2460 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2461 	{ 270000,
2462 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2463 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2464 	{ 162000,
2465 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2466 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2467 	{ 324000,
2468 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2469 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2470 	{ 216000,
2471 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2472 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2473 	{ 432000,
2474 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2475 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2476 	{ 648000,
2477 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2478 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2479 	{ 810000,
2480 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2481 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2482 };
2483 
2484 
2485 /* Also used for 38.4 MHz values. */
2486 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2487 	{ 540000,
2488 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2489 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2490 	{ 270000,
2491 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2492 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2493 	{ 162000,
2494 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2495 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2496 	{ 324000,
2497 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2498 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2499 	{ 216000,
2500 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2501 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2502 	{ 432000,
2503 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2504 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2505 	{ 648000,
2506 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2507 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 	{ 810000,
2509 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2510 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2511 };
2512 
2513 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2514 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2515 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2516 };
2517 
2518 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2519 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2520 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2521 };
2522 
2523 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2524 				  struct skl_wrpll_params *pll_params)
2525 {
2526 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2527 	const struct icl_combo_pll_params *params =
2528 		dev_priv->cdclk.hw.ref == 24000 ?
2529 		icl_dp_combo_pll_24MHz_values :
2530 		icl_dp_combo_pll_19_2MHz_values;
2531 	int clock = crtc_state->port_clock;
2532 	int i;
2533 
2534 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2535 		if (clock == params[i].clock) {
2536 			*pll_params = params[i].wrpll;
2537 			return true;
2538 		}
2539 	}
2540 
2541 	MISSING_CASE(clock);
2542 	return false;
2543 }
2544 
2545 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2546 			     struct skl_wrpll_params *pll_params)
2547 {
2548 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2549 
2550 	*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2551 			icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2552 	return true;
2553 }
2554 
2555 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2556 				struct intel_encoder *encoder,
2557 				struct intel_dpll_hw_state *pll_state)
2558 {
2559 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2560 	u32 cfgcr0, cfgcr1;
2561 	struct skl_wrpll_params pll_params = { 0 };
2562 	bool ret;
2563 
2564 	if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2565 							encoder->port)))
2566 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2567 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2568 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2569 		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2570 	else
2571 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2572 
2573 	if (!ret)
2574 		return false;
2575 
2576 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2577 		 pll_params.dco_integer;
2578 
2579 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2580 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2581 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2582 		 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2583 
2584 	if (INTEL_GEN(dev_priv) >= 12)
2585 		cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2586 	else
2587 		cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2588 
2589 	memset(pll_state, 0, sizeof(*pll_state));
2590 
2591 	pll_state->cfgcr0 = cfgcr0;
2592 	pll_state->cfgcr1 = cfgcr1;
2593 
2594 	return true;
2595 }
2596 
2597 
2598 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2599 {
2600 	return id - DPLL_ID_ICL_MGPLL1;
2601 }
2602 
2603 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2604 {
2605 	return tc_port + DPLL_ID_ICL_MGPLL1;
2606 }
2607 
2608 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2609 				     u32 *target_dco_khz,
2610 				     struct intel_dpll_hw_state *state)
2611 {
2612 	u32 dco_min_freq, dco_max_freq;
2613 	int div1_vals[] = {7, 5, 3, 2};
2614 	unsigned int i;
2615 	int div2;
2616 
2617 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2618 	dco_max_freq = is_dp ? 8100000 : 10000000;
2619 
2620 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2621 		int div1 = div1_vals[i];
2622 
2623 		for (div2 = 10; div2 > 0; div2--) {
2624 			int dco = div1 * div2 * clock_khz * 5;
2625 			int a_divratio, tlinedrv, inputsel;
2626 			u32 hsdiv;
2627 
2628 			if (dco < dco_min_freq || dco > dco_max_freq)
2629 				continue;
2630 
2631 			if (div2 >= 2) {
2632 				a_divratio = is_dp ? 10 : 5;
2633 				tlinedrv = 2;
2634 			} else {
2635 				a_divratio = 5;
2636 				tlinedrv = 0;
2637 			}
2638 			inputsel = is_dp ? 0 : 1;
2639 
2640 			switch (div1) {
2641 			default:
2642 				MISSING_CASE(div1);
2643 				/* fall through */
2644 			case 2:
2645 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2646 				break;
2647 			case 3:
2648 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2649 				break;
2650 			case 5:
2651 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2652 				break;
2653 			case 7:
2654 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2655 				break;
2656 			}
2657 
2658 			*target_dco_khz = dco;
2659 
2660 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2661 
2662 			state->mg_clktop2_coreclkctl1 =
2663 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2664 
2665 			state->mg_clktop2_hsclkctl =
2666 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2667 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2668 				hsdiv |
2669 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2670 
2671 			return true;
2672 		}
2673 	}
2674 
2675 	return false;
2676 }
2677 
2678 /*
2679  * The specification for this function uses real numbers, so the math had to be
2680  * adapted to integer-only calculation, that's why it looks so different.
2681  */
2682 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2683 				  struct intel_dpll_hw_state *pll_state)
2684 {
2685 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2686 	int refclk_khz = dev_priv->cdclk.hw.ref;
2687 	int clock = crtc_state->port_clock;
2688 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2689 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2690 	u32 prop_coeff, int_coeff;
2691 	u32 tdc_targetcnt, feedfwgain;
2692 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2693 	u64 tmp;
2694 	bool use_ssc = false;
2695 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2696 
2697 	memset(pll_state, 0, sizeof(*pll_state));
2698 
2699 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2700 				      pll_state)) {
2701 		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2702 		return false;
2703 	}
2704 
2705 	m1div = 2;
2706 	m2div_int = dco_khz / (refclk_khz * m1div);
2707 	if (m2div_int > 255) {
2708 		m1div = 4;
2709 		m2div_int = dco_khz / (refclk_khz * m1div);
2710 		if (m2div_int > 255) {
2711 			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2712 				      clock);
2713 			return false;
2714 		}
2715 	}
2716 	m2div_rem = dco_khz % (refclk_khz * m1div);
2717 
2718 	tmp = (u64)m2div_rem * (1 << 22);
2719 	do_div(tmp, refclk_khz * m1div);
2720 	m2div_frac = tmp;
2721 
2722 	switch (refclk_khz) {
2723 	case 19200:
2724 		iref_ndiv = 1;
2725 		iref_trim = 28;
2726 		iref_pulse_w = 1;
2727 		break;
2728 	case 24000:
2729 		iref_ndiv = 1;
2730 		iref_trim = 25;
2731 		iref_pulse_w = 2;
2732 		break;
2733 	case 38400:
2734 		iref_ndiv = 2;
2735 		iref_trim = 28;
2736 		iref_pulse_w = 1;
2737 		break;
2738 	default:
2739 		MISSING_CASE(refclk_khz);
2740 		return false;
2741 	}
2742 
2743 	/*
2744 	 * tdc_res = 0.000003
2745 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2746 	 *
2747 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2748 	 * was supposed to be a division, but we rearranged the operations of
2749 	 * the formula to avoid early divisions so we don't multiply the
2750 	 * rounding errors.
2751 	 *
2752 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2753 	 * we also rearrange to work with integers.
2754 	 *
2755 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2756 	 * last division by 10.
2757 	 */
2758 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2759 
2760 	/*
2761 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2762 	 * 32 bits. That's not a problem since we round the division down
2763 	 * anyway.
2764 	 */
2765 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2766 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2767 
2768 	if (dco_khz >= 9000000) {
2769 		prop_coeff = 5;
2770 		int_coeff = 10;
2771 	} else {
2772 		prop_coeff = 4;
2773 		int_coeff = 8;
2774 	}
2775 
2776 	if (use_ssc) {
2777 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2778 		do_div(tmp, refclk_khz * m1div * 10000);
2779 		ssc_stepsize = tmp;
2780 
2781 		tmp = mul_u32_u32(dco_khz, 1000);
2782 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2783 	} else {
2784 		ssc_stepsize = 0;
2785 		ssc_steplen = 0;
2786 	}
2787 	ssc_steplog = 4;
2788 
2789 	pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2790 				  MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2791 				  MG_PLL_DIV0_FBDIV_INT(m2div_int);
2792 
2793 	pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2794 				 MG_PLL_DIV1_DITHER_DIV_2 |
2795 				 MG_PLL_DIV1_NDIVRATIO(1) |
2796 				 MG_PLL_DIV1_FBPREDIV(m1div);
2797 
2798 	pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2799 			       MG_PLL_LF_AFCCNTSEL_512 |
2800 			       MG_PLL_LF_GAINCTRL(1) |
2801 			       MG_PLL_LF_INT_COEFF(int_coeff) |
2802 			       MG_PLL_LF_PROP_COEFF(prop_coeff);
2803 
2804 	pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2805 				      MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2806 				      MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2807 				      MG_PLL_FRAC_LOCK_DCODITHEREN |
2808 				      MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2809 	if (use_ssc || m2div_rem > 0)
2810 		pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2811 
2812 	pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2813 				MG_PLL_SSC_TYPE(2) |
2814 				MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2815 				MG_PLL_SSC_STEPNUM(ssc_steplog) |
2816 				MG_PLL_SSC_FLLEN |
2817 				MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2818 
2819 	pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2820 					    MG_PLL_TDC_COLDST_IREFINT_EN |
2821 					    MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2822 					    MG_PLL_TDC_TDCOVCCORR_EN |
2823 					    MG_PLL_TDC_TDCSEL(3);
2824 
2825 	pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2826 				 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2827 				 MG_PLL_BIAS_BIAS_BONUS(10) |
2828 				 MG_PLL_BIAS_BIASCAL_EN |
2829 				 MG_PLL_BIAS_CTRIM(12) |
2830 				 MG_PLL_BIAS_VREF_RDAC(4) |
2831 				 MG_PLL_BIAS_IREFTRIM(iref_trim);
2832 
2833 	if (refclk_khz == 38400) {
2834 		pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2835 		pll_state->mg_pll_bias_mask = 0;
2836 	} else {
2837 		pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2838 		pll_state->mg_pll_bias_mask = -1U;
2839 	}
2840 
2841 	pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2842 	pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2843 
2844 	return true;
2845 }
2846 
2847 /**
2848  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2849  * @crtc_state: state for the CRTC to select the DPLL for
2850  * @port_dpll_id: the active @port_dpll_id to select
2851  *
2852  * Select the given @port_dpll_id instance from the DPLLs reserved for the
2853  * CRTC.
2854  */
2855 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2856 			      enum icl_port_dpll_id port_dpll_id)
2857 {
2858 	struct icl_port_dpll *port_dpll =
2859 		&crtc_state->icl_port_dplls[port_dpll_id];
2860 
2861 	crtc_state->shared_dpll = port_dpll->pll;
2862 	crtc_state->dpll_hw_state = port_dpll->hw_state;
2863 }
2864 
2865 static void icl_update_active_dpll(struct intel_atomic_state *state,
2866 				   struct intel_crtc *crtc,
2867 				   struct intel_encoder *encoder)
2868 {
2869 	struct intel_crtc_state *crtc_state =
2870 		intel_atomic_get_new_crtc_state(state, crtc);
2871 	struct intel_digital_port *primary_port;
2872 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2873 
2874 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
2875 		enc_to_mst(&encoder->base)->primary :
2876 		enc_to_dig_port(&encoder->base);
2877 
2878 	if (primary_port &&
2879 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
2880 	     primary_port->tc_mode == TC_PORT_LEGACY))
2881 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
2882 
2883 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
2884 }
2885 
2886 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
2887 				   struct intel_crtc *crtc,
2888 				   struct intel_encoder *encoder)
2889 {
2890 	struct intel_crtc_state *crtc_state =
2891 		intel_atomic_get_new_crtc_state(state, crtc);
2892 	struct icl_port_dpll *port_dpll =
2893 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2894 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2895 	enum port port = encoder->port;
2896 	bool has_dpll4 = false;
2897 
2898 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2899 		DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
2900 
2901 		return false;
2902 	}
2903 
2904 	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
2905 		has_dpll4 = true;
2906 
2907 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2908 						&port_dpll->hw_state,
2909 						DPLL_ID_ICL_DPLL0,
2910 						has_dpll4 ? DPLL_ID_EHL_DPLL4
2911 							  : DPLL_ID_ICL_DPLL1);
2912 	if (!port_dpll->pll) {
2913 		DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
2914 			      port_name(encoder->port));
2915 		return false;
2916 	}
2917 
2918 	intel_reference_shared_dpll(state, crtc,
2919 				    port_dpll->pll, &port_dpll->hw_state);
2920 
2921 	icl_update_active_dpll(state, crtc, encoder);
2922 
2923 	return true;
2924 }
2925 
2926 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
2927 				 struct intel_crtc *crtc,
2928 				 struct intel_encoder *encoder)
2929 {
2930 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2931 	struct intel_crtc_state *crtc_state =
2932 		intel_atomic_get_new_crtc_state(state, crtc);
2933 	struct icl_port_dpll *port_dpll;
2934 	enum intel_dpll_id dpll_id;
2935 
2936 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2937 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
2938 		DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
2939 		return false;
2940 	}
2941 
2942 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2943 						&port_dpll->hw_state,
2944 						DPLL_ID_ICL_TBTPLL,
2945 						DPLL_ID_ICL_TBTPLL);
2946 	if (!port_dpll->pll) {
2947 		DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
2948 		return false;
2949 	}
2950 	intel_reference_shared_dpll(state, crtc,
2951 				    port_dpll->pll, &port_dpll->hw_state);
2952 
2953 
2954 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
2955 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
2956 		DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
2957 		goto err_unreference_tbt_pll;
2958 	}
2959 
2960 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
2961 							 encoder->port));
2962 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
2963 						&port_dpll->hw_state,
2964 						dpll_id,
2965 						dpll_id);
2966 	if (!port_dpll->pll) {
2967 		DRM_DEBUG_KMS("No MG PHY PLL found\n");
2968 		goto err_unreference_tbt_pll;
2969 	}
2970 	intel_reference_shared_dpll(state, crtc,
2971 				    port_dpll->pll, &port_dpll->hw_state);
2972 
2973 	icl_update_active_dpll(state, crtc, encoder);
2974 
2975 	return true;
2976 
2977 err_unreference_tbt_pll:
2978 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
2979 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
2980 
2981 	return false;
2982 }
2983 
2984 static bool icl_get_dplls(struct intel_atomic_state *state,
2985 			  struct intel_crtc *crtc,
2986 			  struct intel_encoder *encoder)
2987 {
2988 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2989 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
2990 
2991 	if (intel_phy_is_combo(dev_priv, phy))
2992 		return icl_get_combo_phy_dpll(state, crtc, encoder);
2993 	else if (intel_phy_is_tc(dev_priv, phy))
2994 		return icl_get_tc_phy_dplls(state, crtc, encoder);
2995 
2996 	MISSING_CASE(phy);
2997 
2998 	return false;
2999 }
3000 
3001 static void icl_put_dplls(struct intel_atomic_state *state,
3002 			  struct intel_crtc *crtc)
3003 {
3004 	const struct intel_crtc_state *old_crtc_state =
3005 		intel_atomic_get_old_crtc_state(state, crtc);
3006 	struct intel_crtc_state *new_crtc_state =
3007 		intel_atomic_get_new_crtc_state(state, crtc);
3008 	enum icl_port_dpll_id id;
3009 
3010 	new_crtc_state->shared_dpll = NULL;
3011 
3012 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3013 		const struct icl_port_dpll *old_port_dpll =
3014 			&old_crtc_state->icl_port_dplls[id];
3015 		struct icl_port_dpll *new_port_dpll =
3016 			&new_crtc_state->icl_port_dplls[id];
3017 
3018 		new_port_dpll->pll = NULL;
3019 
3020 		if (!old_port_dpll->pll)
3021 			continue;
3022 
3023 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3024 	}
3025 }
3026 
3027 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3028 				struct intel_shared_dpll *pll,
3029 				struct intel_dpll_hw_state *hw_state)
3030 {
3031 	const enum intel_dpll_id id = pll->info->id;
3032 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3033 	intel_wakeref_t wakeref;
3034 	bool ret = false;
3035 	u32 val;
3036 
3037 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3038 						     POWER_DOMAIN_DISPLAY_CORE);
3039 	if (!wakeref)
3040 		return false;
3041 
3042 	val = I915_READ(MG_PLL_ENABLE(tc_port));
3043 	if (!(val & PLL_ENABLE))
3044 		goto out;
3045 
3046 	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
3047 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3048 
3049 	hw_state->mg_clktop2_coreclkctl1 =
3050 		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3051 	hw_state->mg_clktop2_coreclkctl1 &=
3052 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3053 
3054 	hw_state->mg_clktop2_hsclkctl =
3055 		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3056 	hw_state->mg_clktop2_hsclkctl &=
3057 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3058 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3059 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3060 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3061 
3062 	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
3063 	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
3064 	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
3065 	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
3066 	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
3067 
3068 	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
3069 	hw_state->mg_pll_tdc_coldst_bias =
3070 		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3071 
3072 	if (dev_priv->cdclk.hw.ref == 38400) {
3073 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3074 		hw_state->mg_pll_bias_mask = 0;
3075 	} else {
3076 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3077 		hw_state->mg_pll_bias_mask = -1U;
3078 	}
3079 
3080 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3081 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3082 
3083 	ret = true;
3084 out:
3085 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3086 	return ret;
3087 }
3088 
3089 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3090 				 struct intel_shared_dpll *pll,
3091 				 struct intel_dpll_hw_state *hw_state,
3092 				 i915_reg_t enable_reg)
3093 {
3094 	const enum intel_dpll_id id = pll->info->id;
3095 	intel_wakeref_t wakeref;
3096 	bool ret = false;
3097 	u32 val;
3098 
3099 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3100 						     POWER_DOMAIN_DISPLAY_CORE);
3101 	if (!wakeref)
3102 		return false;
3103 
3104 	val = I915_READ(enable_reg);
3105 	if (!(val & PLL_ENABLE))
3106 		goto out;
3107 
3108 	if (INTEL_GEN(dev_priv) >= 12) {
3109 		hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
3110 		hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
3111 	} else {
3112 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3113 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
3114 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
3115 		} else {
3116 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3117 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3118 		}
3119 	}
3120 
3121 	ret = true;
3122 out:
3123 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3124 	return ret;
3125 }
3126 
3127 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3128 				   struct intel_shared_dpll *pll,
3129 				   struct intel_dpll_hw_state *hw_state)
3130 {
3131 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3132 
3133 	if (IS_ELKHARTLAKE(dev_priv) &&
3134 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3135 		enable_reg = MG_PLL_ENABLE(0);
3136 	}
3137 
3138 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3139 }
3140 
3141 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3142 				 struct intel_shared_dpll *pll,
3143 				 struct intel_dpll_hw_state *hw_state)
3144 {
3145 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3146 }
3147 
3148 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3149 			   struct intel_shared_dpll *pll)
3150 {
3151 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3152 	const enum intel_dpll_id id = pll->info->id;
3153 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3154 
3155 	if (INTEL_GEN(dev_priv) >= 12) {
3156 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3157 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3158 	} else {
3159 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3160 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3161 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3162 		} else {
3163 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3164 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3165 		}
3166 	}
3167 
3168 	I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
3169 	I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
3170 	POSTING_READ(cfgcr1_reg);
3171 }
3172 
3173 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3174 			     struct intel_shared_dpll *pll)
3175 {
3176 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3177 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3178 	u32 val;
3179 
3180 	/*
3181 	 * Some of the following registers have reserved fields, so program
3182 	 * these with RMW based on a mask. The mask can be fixed or generated
3183 	 * during the calc/readout phase if the mask depends on some other HW
3184 	 * state like refclk, see icl_calc_mg_pll_state().
3185 	 */
3186 	val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3187 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3188 	val |= hw_state->mg_refclkin_ctl;
3189 	I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3190 
3191 	val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3192 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3193 	val |= hw_state->mg_clktop2_coreclkctl1;
3194 	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3195 
3196 	val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3197 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3198 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3199 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3200 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3201 	val |= hw_state->mg_clktop2_hsclkctl;
3202 	I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3203 
3204 	I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3205 	I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3206 	I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3207 	I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3208 	I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3209 
3210 	val = I915_READ(MG_PLL_BIAS(tc_port));
3211 	val &= ~hw_state->mg_pll_bias_mask;
3212 	val |= hw_state->mg_pll_bias;
3213 	I915_WRITE(MG_PLL_BIAS(tc_port), val);
3214 
3215 	val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3216 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3217 	val |= hw_state->mg_pll_tdc_coldst_bias;
3218 	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3219 
3220 	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3221 }
3222 
3223 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3224 				 struct intel_shared_dpll *pll,
3225 				 i915_reg_t enable_reg)
3226 {
3227 	u32 val;
3228 
3229 	val = I915_READ(enable_reg);
3230 	val |= PLL_POWER_ENABLE;
3231 	I915_WRITE(enable_reg, val);
3232 
3233 	/*
3234 	 * The spec says we need to "wait" but it also says it should be
3235 	 * immediate.
3236 	 */
3237 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3238 		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3239 }
3240 
3241 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3242 			   struct intel_shared_dpll *pll,
3243 			   i915_reg_t enable_reg)
3244 {
3245 	u32 val;
3246 
3247 	val = I915_READ(enable_reg);
3248 	val |= PLL_ENABLE;
3249 	I915_WRITE(enable_reg, val);
3250 
3251 	/* Timeout is actually 600us. */
3252 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3253 		DRM_ERROR("PLL %d not locked\n", pll->info->id);
3254 }
3255 
3256 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3257 			     struct intel_shared_dpll *pll)
3258 {
3259 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3260 
3261 	if (IS_ELKHARTLAKE(dev_priv) &&
3262 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3263 		enable_reg = MG_PLL_ENABLE(0);
3264 
3265 		/*
3266 		 * We need to disable DC states when this DPLL is enabled.
3267 		 * This can be done by taking a reference on DPLL4 power
3268 		 * domain.
3269 		 */
3270 		pll->wakeref = intel_display_power_get(dev_priv,
3271 						       POWER_DOMAIN_DPLL_DC_OFF);
3272 	}
3273 
3274 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3275 
3276 	icl_dpll_write(dev_priv, pll);
3277 
3278 	/*
3279 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3280 	 * paths should already be setting the appropriate voltage, hence we do
3281 	 * nothing here.
3282 	 */
3283 
3284 	icl_pll_enable(dev_priv, pll, enable_reg);
3285 
3286 	/* DVFS post sequence would be here. See the comment above. */
3287 }
3288 
3289 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3290 			   struct intel_shared_dpll *pll)
3291 {
3292 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3293 
3294 	icl_dpll_write(dev_priv, pll);
3295 
3296 	/*
3297 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3298 	 * paths should already be setting the appropriate voltage, hence we do
3299 	 * nothing here.
3300 	 */
3301 
3302 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3303 
3304 	/* DVFS post sequence would be here. See the comment above. */
3305 }
3306 
3307 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3308 			  struct intel_shared_dpll *pll)
3309 {
3310 	i915_reg_t enable_reg =
3311 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3312 
3313 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3314 
3315 	icl_mg_pll_write(dev_priv, pll);
3316 
3317 	/*
3318 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3319 	 * paths should already be setting the appropriate voltage, hence we do
3320 	 * nothing here.
3321 	 */
3322 
3323 	icl_pll_enable(dev_priv, pll, enable_reg);
3324 
3325 	/* DVFS post sequence would be here. See the comment above. */
3326 }
3327 
3328 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3329 			    struct intel_shared_dpll *pll,
3330 			    i915_reg_t enable_reg)
3331 {
3332 	u32 val;
3333 
3334 	/* The first steps are done by intel_ddi_post_disable(). */
3335 
3336 	/*
3337 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3338 	 * paths should already be setting the appropriate voltage, hence we do
3339 	 * nothign here.
3340 	 */
3341 
3342 	val = I915_READ(enable_reg);
3343 	val &= ~PLL_ENABLE;
3344 	I915_WRITE(enable_reg, val);
3345 
3346 	/* Timeout is actually 1us. */
3347 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3348 		DRM_ERROR("PLL %d locked\n", pll->info->id);
3349 
3350 	/* DVFS post sequence would be here. See the comment above. */
3351 
3352 	val = I915_READ(enable_reg);
3353 	val &= ~PLL_POWER_ENABLE;
3354 	I915_WRITE(enable_reg, val);
3355 
3356 	/*
3357 	 * The spec says we need to "wait" but it also says it should be
3358 	 * immediate.
3359 	 */
3360 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3361 		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3362 }
3363 
3364 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3365 			      struct intel_shared_dpll *pll)
3366 {
3367 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3368 
3369 	if (IS_ELKHARTLAKE(dev_priv) &&
3370 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3371 		enable_reg = MG_PLL_ENABLE(0);
3372 		icl_pll_disable(dev_priv, pll, enable_reg);
3373 
3374 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3375 					pll->wakeref);
3376 		return;
3377 	}
3378 
3379 	icl_pll_disable(dev_priv, pll, enable_reg);
3380 }
3381 
3382 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3383 			    struct intel_shared_dpll *pll)
3384 {
3385 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3386 }
3387 
3388 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3389 			   struct intel_shared_dpll *pll)
3390 {
3391 	i915_reg_t enable_reg =
3392 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3393 
3394 	icl_pll_disable(dev_priv, pll, enable_reg);
3395 }
3396 
3397 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3398 			      const struct intel_dpll_hw_state *hw_state)
3399 {
3400 	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3401 		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3402 		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3403 		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3404 		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3405 		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3406 		      hw_state->cfgcr0, hw_state->cfgcr1,
3407 		      hw_state->mg_refclkin_ctl,
3408 		      hw_state->mg_clktop2_coreclkctl1,
3409 		      hw_state->mg_clktop2_hsclkctl,
3410 		      hw_state->mg_pll_div0,
3411 		      hw_state->mg_pll_div1,
3412 		      hw_state->mg_pll_lf,
3413 		      hw_state->mg_pll_frac_lock,
3414 		      hw_state->mg_pll_ssc,
3415 		      hw_state->mg_pll_bias,
3416 		      hw_state->mg_pll_tdc_coldst_bias);
3417 }
3418 
3419 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3420 	.enable = combo_pll_enable,
3421 	.disable = combo_pll_disable,
3422 	.get_hw_state = combo_pll_get_hw_state,
3423 };
3424 
3425 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3426 	.enable = tbt_pll_enable,
3427 	.disable = tbt_pll_disable,
3428 	.get_hw_state = tbt_pll_get_hw_state,
3429 };
3430 
3431 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3432 	.enable = mg_pll_enable,
3433 	.disable = mg_pll_disable,
3434 	.get_hw_state = mg_pll_get_hw_state,
3435 };
3436 
3437 static const struct dpll_info icl_plls[] = {
3438 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3439 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3440 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3441 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3442 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3443 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3444 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3445 	{ },
3446 };
3447 
3448 static const struct intel_dpll_mgr icl_pll_mgr = {
3449 	.dpll_info = icl_plls,
3450 	.get_dplls = icl_get_dplls,
3451 	.put_dplls = icl_put_dplls,
3452 	.update_active_dpll = icl_update_active_dpll,
3453 	.dump_hw_state = icl_dump_hw_state,
3454 };
3455 
3456 static const struct dpll_info ehl_plls[] = {
3457 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3458 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3459 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3460 	{ },
3461 };
3462 
3463 static const struct intel_dpll_mgr ehl_pll_mgr = {
3464 	.dpll_info = ehl_plls,
3465 	.get_dplls = icl_get_dplls,
3466 	.put_dplls = icl_put_dplls,
3467 	.dump_hw_state = icl_dump_hw_state,
3468 };
3469 
3470 static const struct dpll_info tgl_plls[] = {
3471 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3472 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3473 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3474 	/* TODO: Add typeC plls */
3475 	{ },
3476 };
3477 
3478 static const struct intel_dpll_mgr tgl_pll_mgr = {
3479 	.dpll_info = tgl_plls,
3480 	.get_dplls = icl_get_dplls,
3481 	.put_dplls = icl_put_dplls,
3482 	.dump_hw_state = icl_dump_hw_state,
3483 };
3484 
3485 /**
3486  * intel_shared_dpll_init - Initialize shared DPLLs
3487  * @dev: drm device
3488  *
3489  * Initialize shared DPLLs for @dev.
3490  */
3491 void intel_shared_dpll_init(struct drm_device *dev)
3492 {
3493 	struct drm_i915_private *dev_priv = to_i915(dev);
3494 	const struct intel_dpll_mgr *dpll_mgr = NULL;
3495 	const struct dpll_info *dpll_info;
3496 	int i;
3497 
3498 	if (INTEL_GEN(dev_priv) >= 12)
3499 		dpll_mgr = &tgl_pll_mgr;
3500 	else if (IS_ELKHARTLAKE(dev_priv))
3501 		dpll_mgr = &ehl_pll_mgr;
3502 	else if (INTEL_GEN(dev_priv) >= 11)
3503 		dpll_mgr = &icl_pll_mgr;
3504 	else if (IS_CANNONLAKE(dev_priv))
3505 		dpll_mgr = &cnl_pll_mgr;
3506 	else if (IS_GEN9_BC(dev_priv))
3507 		dpll_mgr = &skl_pll_mgr;
3508 	else if (IS_GEN9_LP(dev_priv))
3509 		dpll_mgr = &bxt_pll_mgr;
3510 	else if (HAS_DDI(dev_priv))
3511 		dpll_mgr = &hsw_pll_mgr;
3512 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3513 		dpll_mgr = &pch_pll_mgr;
3514 
3515 	if (!dpll_mgr) {
3516 		dev_priv->num_shared_dpll = 0;
3517 		return;
3518 	}
3519 
3520 	dpll_info = dpll_mgr->dpll_info;
3521 
3522 	for (i = 0; dpll_info[i].name; i++) {
3523 		WARN_ON(i != dpll_info[i].id);
3524 		dev_priv->shared_dplls[i].info = &dpll_info[i];
3525 	}
3526 
3527 	dev_priv->dpll_mgr = dpll_mgr;
3528 	dev_priv->num_shared_dpll = i;
3529 	mutex_init(&dev_priv->dpll_lock);
3530 
3531 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3532 }
3533 
3534 /**
3535  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3536  * @state: atomic state
3537  * @crtc: CRTC to reserve DPLLs for
3538  * @encoder: encoder
3539  *
3540  * This function reserves all required DPLLs for the given CRTC and encoder
3541  * combination in the current atomic commit @state and the new @crtc atomic
3542  * state.
3543  *
3544  * The new configuration in the atomic commit @state is made effective by
3545  * calling intel_shared_dpll_swap_state().
3546  *
3547  * The reserved DPLLs should be released by calling
3548  * intel_release_shared_dplls().
3549  *
3550  * Returns:
3551  * True if all required DPLLs were successfully reserved.
3552  */
3553 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3554 				struct intel_crtc *crtc,
3555 				struct intel_encoder *encoder)
3556 {
3557 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3558 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3559 
3560 	if (WARN_ON(!dpll_mgr))
3561 		return false;
3562 
3563 	return dpll_mgr->get_dplls(state, crtc, encoder);
3564 }
3565 
3566 /**
3567  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3568  * @state: atomic state
3569  * @crtc: crtc from which the DPLLs are to be released
3570  *
3571  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3572  * from the current atomic commit @state and the old @crtc atomic state.
3573  *
3574  * The new configuration in the atomic commit @state is made effective by
3575  * calling intel_shared_dpll_swap_state().
3576  */
3577 void intel_release_shared_dplls(struct intel_atomic_state *state,
3578 				struct intel_crtc *crtc)
3579 {
3580 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3581 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3582 
3583 	/*
3584 	 * FIXME: this function is called for every platform having a
3585 	 * compute_clock hook, even though the platform doesn't yet support
3586 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3587 	 * called on those.
3588 	 */
3589 	if (!dpll_mgr)
3590 		return;
3591 
3592 	dpll_mgr->put_dplls(state, crtc);
3593 }
3594 
3595 /**
3596  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3597  * @state: atomic state
3598  * @crtc: the CRTC for which to update the active DPLL
3599  * @encoder: encoder determining the type of port DPLL
3600  *
3601  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3602  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3603  * DPLL selected will be based on the current mode of the encoder's port.
3604  */
3605 void intel_update_active_dpll(struct intel_atomic_state *state,
3606 			      struct intel_crtc *crtc,
3607 			      struct intel_encoder *encoder)
3608 {
3609 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3610 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3611 
3612 	if (WARN_ON(!dpll_mgr))
3613 		return;
3614 
3615 	dpll_mgr->update_active_dpll(state, crtc, encoder);
3616 }
3617 
3618 /**
3619  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3620  * @dev_priv: i915 drm device
3621  * @hw_state: hw state to be written to the log
3622  *
3623  * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3624  */
3625 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3626 			      const struct intel_dpll_hw_state *hw_state)
3627 {
3628 	if (dev_priv->dpll_mgr) {
3629 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3630 	} else {
3631 		/* fallback for platforms that don't use the shared dpll
3632 		 * infrastructure
3633 		 */
3634 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3635 			      "fp0: 0x%x, fp1: 0x%x\n",
3636 			      hw_state->dpll,
3637 			      hw_state->dpll_md,
3638 			      hw_state->fp0,
3639 			      hw_state->fp1);
3640 	}
3641 }
3642