xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 static void
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 				  struct intel_shared_dpll_state *shared_dpll)
51 {
52 	enum intel_dpll_id i;
53 
54 	/* Copy shared dpll state */
55 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
56 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
57 
58 		shared_dpll[i] = pll->state;
59 	}
60 }
61 
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
64 {
65 	struct intel_atomic_state *state = to_intel_atomic_state(s);
66 
67 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
68 
69 	if (!state->dpll_set) {
70 		state->dpll_set = true;
71 
72 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 						  state->shared_dpll);
74 	}
75 
76 	return state->shared_dpll;
77 }
78 
79 /**
80  * intel_get_shared_dpll_by_id - get a DPLL given its id
81  * @dev_priv: i915 device instance
82  * @id: pll id
83  *
84  * Returns:
85  * A pointer to the DPLL with @id
86  */
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 			    enum intel_dpll_id id)
90 {
91 	return &dev_priv->shared_dplls[id];
92 }
93 
94 /**
95  * intel_get_shared_dpll_id - get the id of a DPLL
96  * @dev_priv: i915 device instance
97  * @pll: the DPLL
98  *
99  * Returns:
100  * The id of @pll
101  */
102 enum intel_dpll_id
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 			 struct intel_shared_dpll *pll)
105 {
106 	if (drm_WARN_ON(&dev_priv->drm, pll < dev_priv->shared_dplls ||
107 			pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
108 		return -1;
109 
110 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 }
112 
113 /* For ILK+ */
114 void assert_shared_dpll(struct drm_i915_private *dev_priv,
115 			struct intel_shared_dpll *pll,
116 			bool state)
117 {
118 	bool cur_state;
119 	struct intel_dpll_hw_state hw_state;
120 
121 	if (drm_WARN(&dev_priv->drm, !pll,
122 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
123 		return;
124 
125 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
126 	I915_STATE_WARN(cur_state != state,
127 	     "%s assertion failure (expected %s, current %s)\n",
128 			pll->info->name, onoff(state), onoff(cur_state));
129 }
130 
131 /**
132  * intel_prepare_shared_dpll - call a dpll's prepare hook
133  * @crtc_state: CRTC, and its state, which has a shared dpll
134  *
135  * This calls the PLL's prepare hook if it has one and if the PLL is not
136  * already enabled. The prepare hook is platform specific.
137  */
138 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
139 {
140 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
141 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
142 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
143 
144 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
145 		return;
146 
147 	mutex_lock(&dev_priv->dpll_lock);
148 	drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
149 	if (!pll->active_mask) {
150 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
151 		drm_WARN_ON(&dev_priv->drm, pll->on);
152 		assert_shared_dpll_disabled(dev_priv, pll);
153 
154 		pll->info->funcs->prepare(dev_priv, pll);
155 	}
156 	mutex_unlock(&dev_priv->dpll_lock);
157 }
158 
159 /**
160  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
161  * @crtc_state: CRTC, and its state, which has a shared DPLL
162  *
163  * Enable the shared DPLL used by @crtc.
164  */
165 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
166 {
167 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
168 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
169 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
170 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
171 	unsigned int old_mask;
172 
173 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
174 		return;
175 
176 	mutex_lock(&dev_priv->dpll_lock);
177 	old_mask = pll->active_mask;
178 
179 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
180 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
181 		goto out;
182 
183 	pll->active_mask |= crtc_mask;
184 
185 	drm_dbg_kms(&dev_priv->drm,
186 		    "enable %s (active %x, on? %d) for crtc %d\n",
187 		    pll->info->name, pll->active_mask, pll->on,
188 		    crtc->base.base.id);
189 
190 	if (old_mask) {
191 		drm_WARN_ON(&dev_priv->drm, !pll->on);
192 		assert_shared_dpll_enabled(dev_priv, pll);
193 		goto out;
194 	}
195 	drm_WARN_ON(&dev_priv->drm, pll->on);
196 
197 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
198 	pll->info->funcs->enable(dev_priv, pll);
199 	pll->on = true;
200 
201 out:
202 	mutex_unlock(&dev_priv->dpll_lock);
203 }
204 
205 /**
206  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
207  * @crtc_state: CRTC, and its state, which has a shared DPLL
208  *
209  * Disable the shared DPLL used by @crtc.
210  */
211 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
212 {
213 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
214 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
215 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
216 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
217 
218 	/* PCH only available on ILK+ */
219 	if (INTEL_GEN(dev_priv) < 5)
220 		return;
221 
222 	if (pll == NULL)
223 		return;
224 
225 	mutex_lock(&dev_priv->dpll_lock);
226 	if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
227 		goto out;
228 
229 	drm_dbg_kms(&dev_priv->drm,
230 		    "disable %s (active %x, on? %d) for crtc %d\n",
231 		    pll->info->name, pll->active_mask, pll->on,
232 		    crtc->base.base.id);
233 
234 	assert_shared_dpll_enabled(dev_priv, pll);
235 	drm_WARN_ON(&dev_priv->drm, !pll->on);
236 
237 	pll->active_mask &= ~crtc_mask;
238 	if (pll->active_mask)
239 		goto out;
240 
241 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
242 	pll->info->funcs->disable(dev_priv, pll);
243 	pll->on = false;
244 
245 out:
246 	mutex_unlock(&dev_priv->dpll_lock);
247 }
248 
249 static struct intel_shared_dpll *
250 intel_find_shared_dpll(struct intel_atomic_state *state,
251 		       const struct intel_crtc *crtc,
252 		       const struct intel_dpll_hw_state *pll_state,
253 		       unsigned long dpll_mask)
254 {
255 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
256 	struct intel_shared_dpll *pll, *unused_pll = NULL;
257 	struct intel_shared_dpll_state *shared_dpll;
258 	enum intel_dpll_id i;
259 
260 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
261 
262 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
263 
264 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
265 		pll = &dev_priv->shared_dplls[i];
266 
267 		/* Only want to check enabled timings first */
268 		if (shared_dpll[i].crtc_mask == 0) {
269 			if (!unused_pll)
270 				unused_pll = pll;
271 			continue;
272 		}
273 
274 		if (memcmp(pll_state,
275 			   &shared_dpll[i].hw_state,
276 			   sizeof(*pll_state)) == 0) {
277 			drm_dbg_kms(&dev_priv->drm,
278 				    "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
279 				    crtc->base.base.id, crtc->base.name,
280 				    pll->info->name,
281 				    shared_dpll[i].crtc_mask,
282 				    pll->active_mask);
283 			return pll;
284 		}
285 	}
286 
287 	/* Ok no matching timings, maybe there's a free one? */
288 	if (unused_pll) {
289 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
290 			    crtc->base.base.id, crtc->base.name,
291 			    unused_pll->info->name);
292 		return unused_pll;
293 	}
294 
295 	return NULL;
296 }
297 
298 static void
299 intel_reference_shared_dpll(struct intel_atomic_state *state,
300 			    const struct intel_crtc *crtc,
301 			    const struct intel_shared_dpll *pll,
302 			    const struct intel_dpll_hw_state *pll_state)
303 {
304 	struct drm_i915_private *i915 = to_i915(state->base.dev);
305 	struct intel_shared_dpll_state *shared_dpll;
306 	const enum intel_dpll_id id = pll->info->id;
307 
308 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
309 
310 	if (shared_dpll[id].crtc_mask == 0)
311 		shared_dpll[id].hw_state = *pll_state;
312 
313 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
314 		pipe_name(crtc->pipe));
315 
316 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
317 }
318 
319 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
320 					  const struct intel_crtc *crtc,
321 					  const struct intel_shared_dpll *pll)
322 {
323 	struct intel_shared_dpll_state *shared_dpll;
324 
325 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
326 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
327 }
328 
329 static void intel_put_dpll(struct intel_atomic_state *state,
330 			   struct intel_crtc *crtc)
331 {
332 	const struct intel_crtc_state *old_crtc_state =
333 		intel_atomic_get_old_crtc_state(state, crtc);
334 	struct intel_crtc_state *new_crtc_state =
335 		intel_atomic_get_new_crtc_state(state, crtc);
336 
337 	new_crtc_state->shared_dpll = NULL;
338 
339 	if (!old_crtc_state->shared_dpll)
340 		return;
341 
342 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
343 }
344 
345 /**
346  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
347  * @state: atomic state
348  *
349  * This is the dpll version of drm_atomic_helper_swap_state() since the
350  * helper does not handle driver-specific global state.
351  *
352  * For consistency with atomic helpers this function does a complete swap,
353  * i.e. it also puts the current state into @state, even though there is no
354  * need for that at this moment.
355  */
356 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
357 {
358 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
359 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
360 	enum intel_dpll_id i;
361 
362 	if (!state->dpll_set)
363 		return;
364 
365 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
366 		struct intel_shared_dpll *pll =
367 			&dev_priv->shared_dplls[i];
368 
369 		swap(pll->state, shared_dpll[i]);
370 	}
371 }
372 
373 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
374 				      struct intel_shared_dpll *pll,
375 				      struct intel_dpll_hw_state *hw_state)
376 {
377 	const enum intel_dpll_id id = pll->info->id;
378 	intel_wakeref_t wakeref;
379 	u32 val;
380 
381 	wakeref = intel_display_power_get_if_enabled(dev_priv,
382 						     POWER_DOMAIN_DISPLAY_CORE);
383 	if (!wakeref)
384 		return false;
385 
386 	val = intel_de_read(dev_priv, PCH_DPLL(id));
387 	hw_state->dpll = val;
388 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
389 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
390 
391 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
392 
393 	return val & DPLL_VCO_ENABLE;
394 }
395 
396 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
397 				 struct intel_shared_dpll *pll)
398 {
399 	const enum intel_dpll_id id = pll->info->id;
400 
401 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
402 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
403 }
404 
405 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
406 {
407 	u32 val;
408 	bool enabled;
409 
410 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
411 
412 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
413 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
414 			    DREF_SUPERSPREAD_SOURCE_MASK));
415 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
416 }
417 
418 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
419 				struct intel_shared_dpll *pll)
420 {
421 	const enum intel_dpll_id id = pll->info->id;
422 
423 	/* PCH refclock must be enabled first */
424 	ibx_assert_pch_refclk_enabled(dev_priv);
425 
426 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
427 
428 	/* Wait for the clocks to stabilize. */
429 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
430 	udelay(150);
431 
432 	/* The pixel multiplier can only be updated once the
433 	 * DPLL is enabled and the clocks are stable.
434 	 *
435 	 * So write it again.
436 	 */
437 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
438 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
439 	udelay(200);
440 }
441 
442 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
443 				 struct intel_shared_dpll *pll)
444 {
445 	const enum intel_dpll_id id = pll->info->id;
446 
447 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
448 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
449 	udelay(200);
450 }
451 
452 static bool ibx_get_dpll(struct intel_atomic_state *state,
453 			 struct intel_crtc *crtc,
454 			 struct intel_encoder *encoder)
455 {
456 	struct intel_crtc_state *crtc_state =
457 		intel_atomic_get_new_crtc_state(state, crtc);
458 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
459 	struct intel_shared_dpll *pll;
460 	enum intel_dpll_id i;
461 
462 	if (HAS_PCH_IBX(dev_priv)) {
463 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
464 		i = (enum intel_dpll_id) crtc->pipe;
465 		pll = &dev_priv->shared_dplls[i];
466 
467 		drm_dbg_kms(&dev_priv->drm,
468 			    "[CRTC:%d:%s] using pre-allocated %s\n",
469 			    crtc->base.base.id, crtc->base.name,
470 			    pll->info->name);
471 	} else {
472 		pll = intel_find_shared_dpll(state, crtc,
473 					     &crtc_state->dpll_hw_state,
474 					     BIT(DPLL_ID_PCH_PLL_B) |
475 					     BIT(DPLL_ID_PCH_PLL_A));
476 	}
477 
478 	if (!pll)
479 		return false;
480 
481 	/* reference the pll */
482 	intel_reference_shared_dpll(state, crtc,
483 				    pll, &crtc_state->dpll_hw_state);
484 
485 	crtc_state->shared_dpll = pll;
486 
487 	return true;
488 }
489 
490 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
491 			      const struct intel_dpll_hw_state *hw_state)
492 {
493 	drm_dbg_kms(&dev_priv->drm,
494 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
495 		    "fp0: 0x%x, fp1: 0x%x\n",
496 		    hw_state->dpll,
497 		    hw_state->dpll_md,
498 		    hw_state->fp0,
499 		    hw_state->fp1);
500 }
501 
502 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
503 	.prepare = ibx_pch_dpll_prepare,
504 	.enable = ibx_pch_dpll_enable,
505 	.disable = ibx_pch_dpll_disable,
506 	.get_hw_state = ibx_pch_dpll_get_hw_state,
507 };
508 
509 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
510 			       struct intel_shared_dpll *pll)
511 {
512 	const enum intel_dpll_id id = pll->info->id;
513 
514 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
515 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
516 	udelay(20);
517 }
518 
519 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
520 				struct intel_shared_dpll *pll)
521 {
522 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
523 	intel_de_posting_read(dev_priv, SPLL_CTL);
524 	udelay(20);
525 }
526 
527 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
528 				  struct intel_shared_dpll *pll)
529 {
530 	const enum intel_dpll_id id = pll->info->id;
531 	u32 val;
532 
533 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
534 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
535 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
536 
537 	/*
538 	 * Try to set up the PCH reference clock once all DPLLs
539 	 * that depend on it have been shut down.
540 	 */
541 	if (dev_priv->pch_ssc_use & BIT(id))
542 		intel_init_pch_refclk(dev_priv);
543 }
544 
545 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
546 				 struct intel_shared_dpll *pll)
547 {
548 	enum intel_dpll_id id = pll->info->id;
549 	u32 val;
550 
551 	val = intel_de_read(dev_priv, SPLL_CTL);
552 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
553 	intel_de_posting_read(dev_priv, SPLL_CTL);
554 
555 	/*
556 	 * Try to set up the PCH reference clock once all DPLLs
557 	 * that depend on it have been shut down.
558 	 */
559 	if (dev_priv->pch_ssc_use & BIT(id))
560 		intel_init_pch_refclk(dev_priv);
561 }
562 
563 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
564 				       struct intel_shared_dpll *pll,
565 				       struct intel_dpll_hw_state *hw_state)
566 {
567 	const enum intel_dpll_id id = pll->info->id;
568 	intel_wakeref_t wakeref;
569 	u32 val;
570 
571 	wakeref = intel_display_power_get_if_enabled(dev_priv,
572 						     POWER_DOMAIN_DISPLAY_CORE);
573 	if (!wakeref)
574 		return false;
575 
576 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
577 	hw_state->wrpll = val;
578 
579 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
580 
581 	return val & WRPLL_PLL_ENABLE;
582 }
583 
584 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
585 				      struct intel_shared_dpll *pll,
586 				      struct intel_dpll_hw_state *hw_state)
587 {
588 	intel_wakeref_t wakeref;
589 	u32 val;
590 
591 	wakeref = intel_display_power_get_if_enabled(dev_priv,
592 						     POWER_DOMAIN_DISPLAY_CORE);
593 	if (!wakeref)
594 		return false;
595 
596 	val = intel_de_read(dev_priv, SPLL_CTL);
597 	hw_state->spll = val;
598 
599 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
600 
601 	return val & SPLL_PLL_ENABLE;
602 }
603 
604 #define LC_FREQ 2700
605 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
606 
607 #define P_MIN 2
608 #define P_MAX 64
609 #define P_INC 2
610 
611 /* Constraints for PLL good behavior */
612 #define REF_MIN 48
613 #define REF_MAX 400
614 #define VCO_MIN 2400
615 #define VCO_MAX 4800
616 
617 struct hsw_wrpll_rnp {
618 	unsigned p, n2, r2;
619 };
620 
621 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
622 {
623 	unsigned budget;
624 
625 	switch (clock) {
626 	case 25175000:
627 	case 25200000:
628 	case 27000000:
629 	case 27027000:
630 	case 37762500:
631 	case 37800000:
632 	case 40500000:
633 	case 40541000:
634 	case 54000000:
635 	case 54054000:
636 	case 59341000:
637 	case 59400000:
638 	case 72000000:
639 	case 74176000:
640 	case 74250000:
641 	case 81000000:
642 	case 81081000:
643 	case 89012000:
644 	case 89100000:
645 	case 108000000:
646 	case 108108000:
647 	case 111264000:
648 	case 111375000:
649 	case 148352000:
650 	case 148500000:
651 	case 162000000:
652 	case 162162000:
653 	case 222525000:
654 	case 222750000:
655 	case 296703000:
656 	case 297000000:
657 		budget = 0;
658 		break;
659 	case 233500000:
660 	case 245250000:
661 	case 247750000:
662 	case 253250000:
663 	case 298000000:
664 		budget = 1500;
665 		break;
666 	case 169128000:
667 	case 169500000:
668 	case 179500000:
669 	case 202000000:
670 		budget = 2000;
671 		break;
672 	case 256250000:
673 	case 262500000:
674 	case 270000000:
675 	case 272500000:
676 	case 273750000:
677 	case 280750000:
678 	case 281250000:
679 	case 286000000:
680 	case 291750000:
681 		budget = 4000;
682 		break;
683 	case 267250000:
684 	case 268500000:
685 		budget = 5000;
686 		break;
687 	default:
688 		budget = 1000;
689 		break;
690 	}
691 
692 	return budget;
693 }
694 
695 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
696 				 unsigned int r2, unsigned int n2,
697 				 unsigned int p,
698 				 struct hsw_wrpll_rnp *best)
699 {
700 	u64 a, b, c, d, diff, diff_best;
701 
702 	/* No best (r,n,p) yet */
703 	if (best->p == 0) {
704 		best->p = p;
705 		best->n2 = n2;
706 		best->r2 = r2;
707 		return;
708 	}
709 
710 	/*
711 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
712 	 * freq2k.
713 	 *
714 	 * delta = 1e6 *
715 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
716 	 *	   freq2k;
717 	 *
718 	 * and we would like delta <= budget.
719 	 *
720 	 * If the discrepancy is above the PPM-based budget, always prefer to
721 	 * improve upon the previous solution.  However, if you're within the
722 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
723 	 */
724 	a = freq2k * budget * p * r2;
725 	b = freq2k * budget * best->p * best->r2;
726 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
727 	diff_best = abs_diff(freq2k * best->p * best->r2,
728 			     LC_FREQ_2K * best->n2);
729 	c = 1000000 * diff;
730 	d = 1000000 * diff_best;
731 
732 	if (a < c && b < d) {
733 		/* If both are above the budget, pick the closer */
734 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
735 			best->p = p;
736 			best->n2 = n2;
737 			best->r2 = r2;
738 		}
739 	} else if (a >= c && b < d) {
740 		/* If A is below the threshold but B is above it?  Update. */
741 		best->p = p;
742 		best->n2 = n2;
743 		best->r2 = r2;
744 	} else if (a >= c && b >= d) {
745 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
746 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
747 			best->p = p;
748 			best->n2 = n2;
749 			best->r2 = r2;
750 		}
751 	}
752 	/* Otherwise a < c && b >= d, do nothing */
753 }
754 
755 static void
756 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
757 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
758 {
759 	u64 freq2k;
760 	unsigned p, n2, r2;
761 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
762 	unsigned budget;
763 
764 	freq2k = clock / 100;
765 
766 	budget = hsw_wrpll_get_budget_for_freq(clock);
767 
768 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
769 	 * and directly pass the LC PLL to it. */
770 	if (freq2k == 5400000) {
771 		*n2_out = 2;
772 		*p_out = 1;
773 		*r2_out = 2;
774 		return;
775 	}
776 
777 	/*
778 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
779 	 * the WR PLL.
780 	 *
781 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
782 	 * Injecting R2 = 2 * R gives:
783 	 *   REF_MAX * r2 > LC_FREQ * 2 and
784 	 *   REF_MIN * r2 < LC_FREQ * 2
785 	 *
786 	 * Which means the desired boundaries for r2 are:
787 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
788 	 *
789 	 */
790 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
791 	     r2 <= LC_FREQ * 2 / REF_MIN;
792 	     r2++) {
793 
794 		/*
795 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
796 		 *
797 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
798 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
799 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
800 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
801 		 *
802 		 * Which means the desired boundaries for n2 are:
803 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
804 		 */
805 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
806 		     n2 <= VCO_MAX * r2 / LC_FREQ;
807 		     n2++) {
808 
809 			for (p = P_MIN; p <= P_MAX; p += P_INC)
810 				hsw_wrpll_update_rnp(freq2k, budget,
811 						     r2, n2, p, &best);
812 		}
813 	}
814 
815 	*n2_out = best.n2;
816 	*p_out = best.p;
817 	*r2_out = best.r2;
818 }
819 
820 static struct intel_shared_dpll *
821 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
822 		      struct intel_crtc *crtc)
823 {
824 	struct intel_crtc_state *crtc_state =
825 		intel_atomic_get_new_crtc_state(state, crtc);
826 	struct intel_shared_dpll *pll;
827 	u32 val;
828 	unsigned int p, n2, r2;
829 
830 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
831 
832 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
833 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
834 	      WRPLL_DIVIDER_POST(p);
835 
836 	crtc_state->dpll_hw_state.wrpll = val;
837 
838 	pll = intel_find_shared_dpll(state, crtc,
839 				     &crtc_state->dpll_hw_state,
840 				     BIT(DPLL_ID_WRPLL2) |
841 				     BIT(DPLL_ID_WRPLL1));
842 
843 	if (!pll)
844 		return NULL;
845 
846 	return pll;
847 }
848 
849 static struct intel_shared_dpll *
850 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
851 {
852 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
853 	struct intel_shared_dpll *pll;
854 	enum intel_dpll_id pll_id;
855 	int clock = crtc_state->port_clock;
856 
857 	switch (clock / 2) {
858 	case 81000:
859 		pll_id = DPLL_ID_LCPLL_810;
860 		break;
861 	case 135000:
862 		pll_id = DPLL_ID_LCPLL_1350;
863 		break;
864 	case 270000:
865 		pll_id = DPLL_ID_LCPLL_2700;
866 		break;
867 	default:
868 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
869 			    clock);
870 		return NULL;
871 	}
872 
873 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
874 
875 	if (!pll)
876 		return NULL;
877 
878 	return pll;
879 }
880 
881 static bool hsw_get_dpll(struct intel_atomic_state *state,
882 			 struct intel_crtc *crtc,
883 			 struct intel_encoder *encoder)
884 {
885 	struct intel_crtc_state *crtc_state =
886 		intel_atomic_get_new_crtc_state(state, crtc);
887 	struct intel_shared_dpll *pll;
888 
889 	memset(&crtc_state->dpll_hw_state, 0,
890 	       sizeof(crtc_state->dpll_hw_state));
891 
892 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
893 		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
894 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
895 		pll = hsw_ddi_dp_get_dpll(crtc_state);
896 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
897 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
898 			return false;
899 
900 		crtc_state->dpll_hw_state.spll =
901 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
902 
903 		pll = intel_find_shared_dpll(state, crtc,
904 					     &crtc_state->dpll_hw_state,
905 					     BIT(DPLL_ID_SPLL));
906 	} else {
907 		return false;
908 	}
909 
910 	if (!pll)
911 		return false;
912 
913 	intel_reference_shared_dpll(state, crtc,
914 				    pll, &crtc_state->dpll_hw_state);
915 
916 	crtc_state->shared_dpll = pll;
917 
918 	return true;
919 }
920 
921 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
922 			      const struct intel_dpll_hw_state *hw_state)
923 {
924 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
925 		    hw_state->wrpll, hw_state->spll);
926 }
927 
928 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
929 	.enable = hsw_ddi_wrpll_enable,
930 	.disable = hsw_ddi_wrpll_disable,
931 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
932 };
933 
934 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
935 	.enable = hsw_ddi_spll_enable,
936 	.disable = hsw_ddi_spll_disable,
937 	.get_hw_state = hsw_ddi_spll_get_hw_state,
938 };
939 
940 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
941 				 struct intel_shared_dpll *pll)
942 {
943 }
944 
945 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
946 				  struct intel_shared_dpll *pll)
947 {
948 }
949 
950 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
951 				       struct intel_shared_dpll *pll,
952 				       struct intel_dpll_hw_state *hw_state)
953 {
954 	return true;
955 }
956 
957 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
958 	.enable = hsw_ddi_lcpll_enable,
959 	.disable = hsw_ddi_lcpll_disable,
960 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
961 };
962 
963 struct skl_dpll_regs {
964 	i915_reg_t ctl, cfgcr1, cfgcr2;
965 };
966 
967 /* this array is indexed by the *shared* pll id */
968 static const struct skl_dpll_regs skl_dpll_regs[4] = {
969 	{
970 		/* DPLL 0 */
971 		.ctl = LCPLL1_CTL,
972 		/* DPLL 0 doesn't support HDMI mode */
973 	},
974 	{
975 		/* DPLL 1 */
976 		.ctl = LCPLL2_CTL,
977 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
978 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
979 	},
980 	{
981 		/* DPLL 2 */
982 		.ctl = WRPLL_CTL(0),
983 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
984 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
985 	},
986 	{
987 		/* DPLL 3 */
988 		.ctl = WRPLL_CTL(1),
989 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
990 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
991 	},
992 };
993 
994 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
995 				    struct intel_shared_dpll *pll)
996 {
997 	const enum intel_dpll_id id = pll->info->id;
998 	u32 val;
999 
1000 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1001 
1002 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1003 		 DPLL_CTRL1_SSC(id) |
1004 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1005 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1006 
1007 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1008 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1009 }
1010 
1011 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1012 			       struct intel_shared_dpll *pll)
1013 {
1014 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1015 	const enum intel_dpll_id id = pll->info->id;
1016 
1017 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1018 
1019 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1020 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1021 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1022 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1023 
1024 	/* the enable bit is always bit 31 */
1025 	intel_de_write(dev_priv, regs[id].ctl,
1026 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1027 
1028 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1029 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1030 }
1031 
1032 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1033 				 struct intel_shared_dpll *pll)
1034 {
1035 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1036 }
1037 
1038 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1039 				struct intel_shared_dpll *pll)
1040 {
1041 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1042 	const enum intel_dpll_id id = pll->info->id;
1043 
1044 	/* the enable bit is always bit 31 */
1045 	intel_de_write(dev_priv, regs[id].ctl,
1046 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1047 	intel_de_posting_read(dev_priv, regs[id].ctl);
1048 }
1049 
1050 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1051 				  struct intel_shared_dpll *pll)
1052 {
1053 }
1054 
1055 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1056 				     struct intel_shared_dpll *pll,
1057 				     struct intel_dpll_hw_state *hw_state)
1058 {
1059 	u32 val;
1060 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1061 	const enum intel_dpll_id id = pll->info->id;
1062 	intel_wakeref_t wakeref;
1063 	bool ret;
1064 
1065 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1066 						     POWER_DOMAIN_DISPLAY_CORE);
1067 	if (!wakeref)
1068 		return false;
1069 
1070 	ret = false;
1071 
1072 	val = intel_de_read(dev_priv, regs[id].ctl);
1073 	if (!(val & LCPLL_PLL_ENABLE))
1074 		goto out;
1075 
1076 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1077 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1078 
1079 	/* avoid reading back stale values if HDMI mode is not enabled */
1080 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1081 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1082 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1083 	}
1084 	ret = true;
1085 
1086 out:
1087 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1088 
1089 	return ret;
1090 }
1091 
1092 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1093 				       struct intel_shared_dpll *pll,
1094 				       struct intel_dpll_hw_state *hw_state)
1095 {
1096 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1097 	const enum intel_dpll_id id = pll->info->id;
1098 	intel_wakeref_t wakeref;
1099 	u32 val;
1100 	bool ret;
1101 
1102 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1103 						     POWER_DOMAIN_DISPLAY_CORE);
1104 	if (!wakeref)
1105 		return false;
1106 
1107 	ret = false;
1108 
1109 	/* DPLL0 is always enabled since it drives CDCLK */
1110 	val = intel_de_read(dev_priv, regs[id].ctl);
1111 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1112 		goto out;
1113 
1114 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1115 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1116 
1117 	ret = true;
1118 
1119 out:
1120 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1121 
1122 	return ret;
1123 }
1124 
1125 struct skl_wrpll_context {
1126 	u64 min_deviation;		/* current minimal deviation */
1127 	u64 central_freq;		/* chosen central freq */
1128 	u64 dco_freq;			/* chosen dco freq */
1129 	unsigned int p;			/* chosen divider */
1130 };
1131 
1132 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1133 {
1134 	memset(ctx, 0, sizeof(*ctx));
1135 
1136 	ctx->min_deviation = U64_MAX;
1137 }
1138 
1139 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1140 #define SKL_DCO_MAX_PDEVIATION	100
1141 #define SKL_DCO_MAX_NDEVIATION	600
1142 
1143 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1144 				  u64 central_freq,
1145 				  u64 dco_freq,
1146 				  unsigned int divider)
1147 {
1148 	u64 deviation;
1149 
1150 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1151 			      central_freq);
1152 
1153 	/* positive deviation */
1154 	if (dco_freq >= central_freq) {
1155 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1156 		    deviation < ctx->min_deviation) {
1157 			ctx->min_deviation = deviation;
1158 			ctx->central_freq = central_freq;
1159 			ctx->dco_freq = dco_freq;
1160 			ctx->p = divider;
1161 		}
1162 	/* negative deviation */
1163 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1164 		   deviation < ctx->min_deviation) {
1165 		ctx->min_deviation = deviation;
1166 		ctx->central_freq = central_freq;
1167 		ctx->dco_freq = dco_freq;
1168 		ctx->p = divider;
1169 	}
1170 }
1171 
1172 static void skl_wrpll_get_multipliers(unsigned int p,
1173 				      unsigned int *p0 /* out */,
1174 				      unsigned int *p1 /* out */,
1175 				      unsigned int *p2 /* out */)
1176 {
1177 	/* even dividers */
1178 	if (p % 2 == 0) {
1179 		unsigned int half = p / 2;
1180 
1181 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1182 			*p0 = 2;
1183 			*p1 = 1;
1184 			*p2 = half;
1185 		} else if (half % 2 == 0) {
1186 			*p0 = 2;
1187 			*p1 = half / 2;
1188 			*p2 = 2;
1189 		} else if (half % 3 == 0) {
1190 			*p0 = 3;
1191 			*p1 = half / 3;
1192 			*p2 = 2;
1193 		} else if (half % 7 == 0) {
1194 			*p0 = 7;
1195 			*p1 = half / 7;
1196 			*p2 = 2;
1197 		}
1198 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1199 		*p0 = 3;
1200 		*p1 = 1;
1201 		*p2 = p / 3;
1202 	} else if (p == 5 || p == 7) {
1203 		*p0 = p;
1204 		*p1 = 1;
1205 		*p2 = 1;
1206 	} else if (p == 15) {
1207 		*p0 = 3;
1208 		*p1 = 1;
1209 		*p2 = 5;
1210 	} else if (p == 21) {
1211 		*p0 = 7;
1212 		*p1 = 1;
1213 		*p2 = 3;
1214 	} else if (p == 35) {
1215 		*p0 = 7;
1216 		*p1 = 1;
1217 		*p2 = 5;
1218 	}
1219 }
1220 
1221 struct skl_wrpll_params {
1222 	u32 dco_fraction;
1223 	u32 dco_integer;
1224 	u32 qdiv_ratio;
1225 	u32 qdiv_mode;
1226 	u32 kdiv;
1227 	u32 pdiv;
1228 	u32 central_freq;
1229 };
1230 
1231 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1232 				      u64 afe_clock,
1233 				      u64 central_freq,
1234 				      u32 p0, u32 p1, u32 p2)
1235 {
1236 	u64 dco_freq;
1237 
1238 	switch (central_freq) {
1239 	case 9600000000ULL:
1240 		params->central_freq = 0;
1241 		break;
1242 	case 9000000000ULL:
1243 		params->central_freq = 1;
1244 		break;
1245 	case 8400000000ULL:
1246 		params->central_freq = 3;
1247 	}
1248 
1249 	switch (p0) {
1250 	case 1:
1251 		params->pdiv = 0;
1252 		break;
1253 	case 2:
1254 		params->pdiv = 1;
1255 		break;
1256 	case 3:
1257 		params->pdiv = 2;
1258 		break;
1259 	case 7:
1260 		params->pdiv = 4;
1261 		break;
1262 	default:
1263 		WARN(1, "Incorrect PDiv\n");
1264 	}
1265 
1266 	switch (p2) {
1267 	case 5:
1268 		params->kdiv = 0;
1269 		break;
1270 	case 2:
1271 		params->kdiv = 1;
1272 		break;
1273 	case 3:
1274 		params->kdiv = 2;
1275 		break;
1276 	case 1:
1277 		params->kdiv = 3;
1278 		break;
1279 	default:
1280 		WARN(1, "Incorrect KDiv\n");
1281 	}
1282 
1283 	params->qdiv_ratio = p1;
1284 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1285 
1286 	dco_freq = p0 * p1 * p2 * afe_clock;
1287 
1288 	/*
1289 	 * Intermediate values are in Hz.
1290 	 * Divide by MHz to match bsepc
1291 	 */
1292 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1293 	params->dco_fraction =
1294 		div_u64((div_u64(dco_freq, 24) -
1295 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1296 }
1297 
1298 static bool
1299 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1300 			struct skl_wrpll_params *wrpll_params)
1301 {
1302 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1303 	u64 dco_central_freq[3] = { 8400000000ULL,
1304 				    9000000000ULL,
1305 				    9600000000ULL };
1306 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1307 					     24, 28, 30, 32, 36, 40, 42, 44,
1308 					     48, 52, 54, 56, 60, 64, 66, 68,
1309 					     70, 72, 76, 78, 80, 84, 88, 90,
1310 					     92, 96, 98 };
1311 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1312 	static const struct {
1313 		const int *list;
1314 		int n_dividers;
1315 	} dividers[] = {
1316 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1317 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1318 	};
1319 	struct skl_wrpll_context ctx;
1320 	unsigned int dco, d, i;
1321 	unsigned int p0, p1, p2;
1322 
1323 	skl_wrpll_context_init(&ctx);
1324 
1325 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1326 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1327 			for (i = 0; i < dividers[d].n_dividers; i++) {
1328 				unsigned int p = dividers[d].list[i];
1329 				u64 dco_freq = p * afe_clock;
1330 
1331 				skl_wrpll_try_divider(&ctx,
1332 						      dco_central_freq[dco],
1333 						      dco_freq,
1334 						      p);
1335 				/*
1336 				 * Skip the remaining dividers if we're sure to
1337 				 * have found the definitive divider, we can't
1338 				 * improve a 0 deviation.
1339 				 */
1340 				if (ctx.min_deviation == 0)
1341 					goto skip_remaining_dividers;
1342 			}
1343 		}
1344 
1345 skip_remaining_dividers:
1346 		/*
1347 		 * If a solution is found with an even divider, prefer
1348 		 * this one.
1349 		 */
1350 		if (d == 0 && ctx.p)
1351 			break;
1352 	}
1353 
1354 	if (!ctx.p) {
1355 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1356 		return false;
1357 	}
1358 
1359 	/*
1360 	 * gcc incorrectly analyses that these can be used without being
1361 	 * initialized. To be fair, it's hard to guess.
1362 	 */
1363 	p0 = p1 = p2 = 0;
1364 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1365 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1366 				  p0, p1, p2);
1367 
1368 	return true;
1369 }
1370 
1371 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1372 {
1373 	u32 ctrl1, cfgcr1, cfgcr2;
1374 	struct skl_wrpll_params wrpll_params = { 0, };
1375 
1376 	/*
1377 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1378 	 * as the DPLL id in this function.
1379 	 */
1380 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1381 
1382 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1383 
1384 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1385 				     &wrpll_params))
1386 		return false;
1387 
1388 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1389 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1390 		wrpll_params.dco_integer;
1391 
1392 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1393 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1394 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1395 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1396 		wrpll_params.central_freq;
1397 
1398 	memset(&crtc_state->dpll_hw_state, 0,
1399 	       sizeof(crtc_state->dpll_hw_state));
1400 
1401 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1402 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1403 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1404 	return true;
1405 }
1406 
1407 static bool
1408 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1409 {
1410 	u32 ctrl1;
1411 
1412 	/*
1413 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1414 	 * as the DPLL id in this function.
1415 	 */
1416 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1417 	switch (crtc_state->port_clock / 2) {
1418 	case 81000:
1419 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1420 		break;
1421 	case 135000:
1422 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1423 		break;
1424 	case 270000:
1425 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1426 		break;
1427 		/* eDP 1.4 rates */
1428 	case 162000:
1429 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1430 		break;
1431 	case 108000:
1432 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1433 		break;
1434 	case 216000:
1435 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1436 		break;
1437 	}
1438 
1439 	memset(&crtc_state->dpll_hw_state, 0,
1440 	       sizeof(crtc_state->dpll_hw_state));
1441 
1442 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1443 
1444 	return true;
1445 }
1446 
1447 static bool skl_get_dpll(struct intel_atomic_state *state,
1448 			 struct intel_crtc *crtc,
1449 			 struct intel_encoder *encoder)
1450 {
1451 	struct intel_crtc_state *crtc_state =
1452 		intel_atomic_get_new_crtc_state(state, crtc);
1453 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1454 	struct intel_shared_dpll *pll;
1455 	bool bret;
1456 
1457 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1458 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1459 		if (!bret) {
1460 			drm_dbg_kms(&i915->drm,
1461 				    "Could not get HDMI pll dividers.\n");
1462 			return false;
1463 		}
1464 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1465 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1466 		if (!bret) {
1467 			drm_dbg_kms(&i915->drm,
1468 				    "Could not set DP dpll HW state.\n");
1469 			return false;
1470 		}
1471 	} else {
1472 		return false;
1473 	}
1474 
1475 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1476 		pll = intel_find_shared_dpll(state, crtc,
1477 					     &crtc_state->dpll_hw_state,
1478 					     BIT(DPLL_ID_SKL_DPLL0));
1479 	else
1480 		pll = intel_find_shared_dpll(state, crtc,
1481 					     &crtc_state->dpll_hw_state,
1482 					     BIT(DPLL_ID_SKL_DPLL3) |
1483 					     BIT(DPLL_ID_SKL_DPLL2) |
1484 					     BIT(DPLL_ID_SKL_DPLL1));
1485 	if (!pll)
1486 		return false;
1487 
1488 	intel_reference_shared_dpll(state, crtc,
1489 				    pll, &crtc_state->dpll_hw_state);
1490 
1491 	crtc_state->shared_dpll = pll;
1492 
1493 	return true;
1494 }
1495 
1496 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1497 			      const struct intel_dpll_hw_state *hw_state)
1498 {
1499 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1500 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1501 		      hw_state->ctrl1,
1502 		      hw_state->cfgcr1,
1503 		      hw_state->cfgcr2);
1504 }
1505 
1506 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1507 	.enable = skl_ddi_pll_enable,
1508 	.disable = skl_ddi_pll_disable,
1509 	.get_hw_state = skl_ddi_pll_get_hw_state,
1510 };
1511 
1512 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1513 	.enable = skl_ddi_dpll0_enable,
1514 	.disable = skl_ddi_dpll0_disable,
1515 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1516 };
1517 
1518 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1519 				struct intel_shared_dpll *pll)
1520 {
1521 	u32 temp;
1522 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1523 	enum dpio_phy phy;
1524 	enum dpio_channel ch;
1525 
1526 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1527 
1528 	/* Non-SSC reference */
1529 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1530 	temp |= PORT_PLL_REF_SEL;
1531 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1532 
1533 	if (IS_GEMINILAKE(dev_priv)) {
1534 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1535 		temp |= PORT_PLL_POWER_ENABLE;
1536 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1537 
1538 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1539 				 PORT_PLL_POWER_STATE), 200))
1540 			drm_err(&dev_priv->drm,
1541 				"Power state not set for PLL:%d\n", port);
1542 	}
1543 
1544 	/* Disable 10 bit clock */
1545 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1546 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1547 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1548 
1549 	/* Write P1 & P2 */
1550 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1551 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1552 	temp |= pll->state.hw_state.ebb0;
1553 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1554 
1555 	/* Write M2 integer */
1556 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1557 	temp &= ~PORT_PLL_M2_MASK;
1558 	temp |= pll->state.hw_state.pll0;
1559 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1560 
1561 	/* Write N */
1562 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1563 	temp &= ~PORT_PLL_N_MASK;
1564 	temp |= pll->state.hw_state.pll1;
1565 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1566 
1567 	/* Write M2 fraction */
1568 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1569 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1570 	temp |= pll->state.hw_state.pll2;
1571 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1572 
1573 	/* Write M2 fraction enable */
1574 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1575 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1576 	temp |= pll->state.hw_state.pll3;
1577 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1578 
1579 	/* Write coeff */
1580 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1581 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1582 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1583 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1584 	temp |= pll->state.hw_state.pll6;
1585 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1586 
1587 	/* Write calibration val */
1588 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1589 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1590 	temp |= pll->state.hw_state.pll8;
1591 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1592 
1593 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1594 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1595 	temp |= pll->state.hw_state.pll9;
1596 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1597 
1598 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1599 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1600 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1601 	temp |= pll->state.hw_state.pll10;
1602 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1603 
1604 	/* Recalibrate with new settings */
1605 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1606 	temp |= PORT_PLL_RECALIBRATE;
1607 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1608 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1609 	temp |= pll->state.hw_state.ebb4;
1610 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1611 
1612 	/* Enable PLL */
1613 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1614 	temp |= PORT_PLL_ENABLE;
1615 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1616 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1617 
1618 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1619 			200))
1620 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1621 
1622 	if (IS_GEMINILAKE(dev_priv)) {
1623 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1624 		temp |= DCC_DELAY_RANGE_2;
1625 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1626 	}
1627 
1628 	/*
1629 	 * While we write to the group register to program all lanes at once we
1630 	 * can read only lane registers and we pick lanes 0/1 for that.
1631 	 */
1632 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1633 	temp &= ~LANE_STAGGER_MASK;
1634 	temp &= ~LANESTAGGER_STRAP_OVRD;
1635 	temp |= pll->state.hw_state.pcsdw12;
1636 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1637 }
1638 
1639 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1640 					struct intel_shared_dpll *pll)
1641 {
1642 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1643 	u32 temp;
1644 
1645 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1646 	temp &= ~PORT_PLL_ENABLE;
1647 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1648 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1649 
1650 	if (IS_GEMINILAKE(dev_priv)) {
1651 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1652 		temp &= ~PORT_PLL_POWER_ENABLE;
1653 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1654 
1655 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1656 				  PORT_PLL_POWER_STATE), 200))
1657 			drm_err(&dev_priv->drm,
1658 				"Power state not reset for PLL:%d\n", port);
1659 	}
1660 }
1661 
1662 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1663 					struct intel_shared_dpll *pll,
1664 					struct intel_dpll_hw_state *hw_state)
1665 {
1666 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1667 	intel_wakeref_t wakeref;
1668 	enum dpio_phy phy;
1669 	enum dpio_channel ch;
1670 	u32 val;
1671 	bool ret;
1672 
1673 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1674 
1675 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1676 						     POWER_DOMAIN_DISPLAY_CORE);
1677 	if (!wakeref)
1678 		return false;
1679 
1680 	ret = false;
1681 
1682 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1683 	if (!(val & PORT_PLL_ENABLE))
1684 		goto out;
1685 
1686 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1687 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1688 
1689 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1690 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1691 
1692 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1693 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1694 
1695 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1696 	hw_state->pll1 &= PORT_PLL_N_MASK;
1697 
1698 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1699 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1700 
1701 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1702 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1703 
1704 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1705 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1706 			  PORT_PLL_INT_COEFF_MASK |
1707 			  PORT_PLL_GAIN_CTL_MASK;
1708 
1709 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1710 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1711 
1712 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1713 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1714 
1715 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1716 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1717 			   PORT_PLL_DCO_AMP_MASK;
1718 
1719 	/*
1720 	 * While we write to the group register to program all lanes at once we
1721 	 * can read only lane registers. We configure all lanes the same way, so
1722 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1723 	 */
1724 	hw_state->pcsdw12 = intel_de_read(dev_priv,
1725 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
1726 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1727 		drm_dbg(&dev_priv->drm,
1728 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1729 			hw_state->pcsdw12,
1730 			intel_de_read(dev_priv,
1731 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
1732 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1733 
1734 	ret = true;
1735 
1736 out:
1737 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1738 
1739 	return ret;
1740 }
1741 
1742 /* bxt clock parameters */
1743 struct bxt_clk_div {
1744 	int clock;
1745 	u32 p1;
1746 	u32 p2;
1747 	u32 m2_int;
1748 	u32 m2_frac;
1749 	bool m2_frac_en;
1750 	u32 n;
1751 
1752 	int vco;
1753 };
1754 
1755 /* pre-calculated values for DP linkrates */
1756 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1757 	{162000, 4, 2, 32, 1677722, 1, 1},
1758 	{270000, 4, 1, 27,       0, 0, 1},
1759 	{540000, 2, 1, 27,       0, 0, 1},
1760 	{216000, 3, 2, 32, 1677722, 1, 1},
1761 	{243000, 4, 1, 24, 1258291, 1, 1},
1762 	{324000, 4, 1, 32, 1677722, 1, 1},
1763 	{432000, 3, 1, 32, 1677722, 1, 1}
1764 };
1765 
1766 static bool
1767 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1768 			  struct bxt_clk_div *clk_div)
1769 {
1770 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1771 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1772 	struct dpll best_clock;
1773 
1774 	/* Calculate HDMI div */
1775 	/*
1776 	 * FIXME: tie the following calculation into
1777 	 * i9xx_crtc_compute_clock
1778 	 */
1779 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1780 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
1781 			crtc_state->port_clock,
1782 			pipe_name(crtc->pipe));
1783 		return false;
1784 	}
1785 
1786 	clk_div->p1 = best_clock.p1;
1787 	clk_div->p2 = best_clock.p2;
1788 	WARN_ON(best_clock.m1 != 2);
1789 	clk_div->n = best_clock.n;
1790 	clk_div->m2_int = best_clock.m2 >> 22;
1791 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1792 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1793 
1794 	clk_div->vco = best_clock.vco;
1795 
1796 	return true;
1797 }
1798 
1799 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1800 				    struct bxt_clk_div *clk_div)
1801 {
1802 	int clock = crtc_state->port_clock;
1803 	int i;
1804 
1805 	*clk_div = bxt_dp_clk_val[0];
1806 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1807 		if (bxt_dp_clk_val[i].clock == clock) {
1808 			*clk_div = bxt_dp_clk_val[i];
1809 			break;
1810 		}
1811 	}
1812 
1813 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1814 }
1815 
1816 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1817 				      const struct bxt_clk_div *clk_div)
1818 {
1819 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1820 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1821 	int clock = crtc_state->port_clock;
1822 	int vco = clk_div->vco;
1823 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1824 	u32 lanestagger;
1825 
1826 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1827 
1828 	if (vco >= 6200000 && vco <= 6700000) {
1829 		prop_coef = 4;
1830 		int_coef = 9;
1831 		gain_ctl = 3;
1832 		targ_cnt = 8;
1833 	} else if ((vco > 5400000 && vco < 6200000) ||
1834 			(vco >= 4800000 && vco < 5400000)) {
1835 		prop_coef = 5;
1836 		int_coef = 11;
1837 		gain_ctl = 3;
1838 		targ_cnt = 9;
1839 	} else if (vco == 5400000) {
1840 		prop_coef = 3;
1841 		int_coef = 8;
1842 		gain_ctl = 1;
1843 		targ_cnt = 9;
1844 	} else {
1845 		drm_err(&i915->drm, "Invalid VCO\n");
1846 		return false;
1847 	}
1848 
1849 	if (clock > 270000)
1850 		lanestagger = 0x18;
1851 	else if (clock > 135000)
1852 		lanestagger = 0x0d;
1853 	else if (clock > 67000)
1854 		lanestagger = 0x07;
1855 	else if (clock > 33000)
1856 		lanestagger = 0x04;
1857 	else
1858 		lanestagger = 0x02;
1859 
1860 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1861 	dpll_hw_state->pll0 = clk_div->m2_int;
1862 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1863 	dpll_hw_state->pll2 = clk_div->m2_frac;
1864 
1865 	if (clk_div->m2_frac_en)
1866 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1867 
1868 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1869 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1870 
1871 	dpll_hw_state->pll8 = targ_cnt;
1872 
1873 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1874 
1875 	dpll_hw_state->pll10 =
1876 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1877 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1878 
1879 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1880 
1881 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1882 
1883 	return true;
1884 }
1885 
1886 static bool
1887 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1888 {
1889 	struct bxt_clk_div clk_div = {};
1890 
1891 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1892 
1893 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1894 }
1895 
1896 static bool
1897 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1898 {
1899 	struct bxt_clk_div clk_div = {};
1900 
1901 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1902 
1903 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1904 }
1905 
1906 static bool bxt_get_dpll(struct intel_atomic_state *state,
1907 			 struct intel_crtc *crtc,
1908 			 struct intel_encoder *encoder)
1909 {
1910 	struct intel_crtc_state *crtc_state =
1911 		intel_atomic_get_new_crtc_state(state, crtc);
1912 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1913 	struct intel_shared_dpll *pll;
1914 	enum intel_dpll_id id;
1915 
1916 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1917 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1918 		return false;
1919 
1920 	if (intel_crtc_has_dp_encoder(crtc_state) &&
1921 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1922 		return false;
1923 
1924 	/* 1:1 mapping between ports and PLLs */
1925 	id = (enum intel_dpll_id) encoder->port;
1926 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
1927 
1928 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
1929 		    crtc->base.base.id, crtc->base.name, pll->info->name);
1930 
1931 	intel_reference_shared_dpll(state, crtc,
1932 				    pll, &crtc_state->dpll_hw_state);
1933 
1934 	crtc_state->shared_dpll = pll;
1935 
1936 	return true;
1937 }
1938 
1939 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1940 			      const struct intel_dpll_hw_state *hw_state)
1941 {
1942 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1943 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1944 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1945 		    hw_state->ebb0,
1946 		    hw_state->ebb4,
1947 		    hw_state->pll0,
1948 		    hw_state->pll1,
1949 		    hw_state->pll2,
1950 		    hw_state->pll3,
1951 		    hw_state->pll6,
1952 		    hw_state->pll8,
1953 		    hw_state->pll9,
1954 		    hw_state->pll10,
1955 		    hw_state->pcsdw12);
1956 }
1957 
1958 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1959 	.enable = bxt_ddi_pll_enable,
1960 	.disable = bxt_ddi_pll_disable,
1961 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1962 };
1963 
1964 struct intel_dpll_mgr {
1965 	const struct dpll_info *dpll_info;
1966 
1967 	bool (*get_dplls)(struct intel_atomic_state *state,
1968 			  struct intel_crtc *crtc,
1969 			  struct intel_encoder *encoder);
1970 	void (*put_dplls)(struct intel_atomic_state *state,
1971 			  struct intel_crtc *crtc);
1972 	void (*update_active_dpll)(struct intel_atomic_state *state,
1973 				   struct intel_crtc *crtc,
1974 				   struct intel_encoder *encoder);
1975 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1976 			      const struct intel_dpll_hw_state *hw_state);
1977 };
1978 
1979 static const struct dpll_info pch_plls[] = {
1980 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1981 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1982 	{ },
1983 };
1984 
1985 static const struct intel_dpll_mgr pch_pll_mgr = {
1986 	.dpll_info = pch_plls,
1987 	.get_dplls = ibx_get_dpll,
1988 	.put_dplls = intel_put_dpll,
1989 	.dump_hw_state = ibx_dump_hw_state,
1990 };
1991 
1992 static const struct dpll_info hsw_plls[] = {
1993 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1994 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1995 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1996 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1997 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1998 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1999 	{ },
2000 };
2001 
2002 static const struct intel_dpll_mgr hsw_pll_mgr = {
2003 	.dpll_info = hsw_plls,
2004 	.get_dplls = hsw_get_dpll,
2005 	.put_dplls = intel_put_dpll,
2006 	.dump_hw_state = hsw_dump_hw_state,
2007 };
2008 
2009 static const struct dpll_info skl_plls[] = {
2010 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
2011 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
2012 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
2013 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
2014 	{ },
2015 };
2016 
2017 static const struct intel_dpll_mgr skl_pll_mgr = {
2018 	.dpll_info = skl_plls,
2019 	.get_dplls = skl_get_dpll,
2020 	.put_dplls = intel_put_dpll,
2021 	.dump_hw_state = skl_dump_hw_state,
2022 };
2023 
2024 static const struct dpll_info bxt_plls[] = {
2025 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2026 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2027 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2028 	{ },
2029 };
2030 
2031 static const struct intel_dpll_mgr bxt_pll_mgr = {
2032 	.dpll_info = bxt_plls,
2033 	.get_dplls = bxt_get_dpll,
2034 	.put_dplls = intel_put_dpll,
2035 	.dump_hw_state = bxt_dump_hw_state,
2036 };
2037 
2038 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2039 			       struct intel_shared_dpll *pll)
2040 {
2041 	const enum intel_dpll_id id = pll->info->id;
2042 	u32 val;
2043 
2044 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2045 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2046 	val |= PLL_POWER_ENABLE;
2047 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2048 
2049 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2050 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2051 				  PLL_POWER_STATE, 5))
2052 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2053 
2054 	/*
2055 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2056 	 * select DP mode, and set DP link rate.
2057 	 */
2058 	val = pll->state.hw_state.cfgcr0;
2059 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2060 
2061 	/* 4. Reab back to ensure writes completed */
2062 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2063 
2064 	/* 3. Configure DPLL_CFGCR0 */
2065 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2066 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2067 		val = pll->state.hw_state.cfgcr1;
2068 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2069 		/* 4. Reab back to ensure writes completed */
2070 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2071 	}
2072 
2073 	/*
2074 	 * 5. If the frequency will result in a change to the voltage
2075 	 * requirement, follow the Display Voltage Frequency Switching
2076 	 * Sequence Before Frequency Change
2077 	 *
2078 	 * Note: DVFS is actually handled via the cdclk code paths,
2079 	 * hence we do nothing here.
2080 	 */
2081 
2082 	/* 6. Enable DPLL in DPLL_ENABLE. */
2083 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2084 	val |= PLL_ENABLE;
2085 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2086 
2087 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2088 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2089 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2090 
2091 	/*
2092 	 * 8. If the frequency will result in a change to the voltage
2093 	 * requirement, follow the Display Voltage Frequency Switching
2094 	 * Sequence After Frequency Change
2095 	 *
2096 	 * Note: DVFS is actually handled via the cdclk code paths,
2097 	 * hence we do nothing here.
2098 	 */
2099 
2100 	/*
2101 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2102 	 * Done at intel_ddi_clk_select
2103 	 */
2104 }
2105 
2106 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2107 				struct intel_shared_dpll *pll)
2108 {
2109 	const enum intel_dpll_id id = pll->info->id;
2110 	u32 val;
2111 
2112 	/*
2113 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2114 	 * Done at intel_ddi_post_disable
2115 	 */
2116 
2117 	/*
2118 	 * 2. If the frequency will result in a change to the voltage
2119 	 * requirement, follow the Display Voltage Frequency Switching
2120 	 * Sequence Before Frequency Change
2121 	 *
2122 	 * Note: DVFS is actually handled via the cdclk code paths,
2123 	 * hence we do nothing here.
2124 	 */
2125 
2126 	/* 3. Disable DPLL through DPLL_ENABLE. */
2127 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2128 	val &= ~PLL_ENABLE;
2129 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2130 
2131 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2132 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2133 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2134 
2135 	/*
2136 	 * 5. If the frequency will result in a change to the voltage
2137 	 * requirement, follow the Display Voltage Frequency Switching
2138 	 * Sequence After Frequency Change
2139 	 *
2140 	 * Note: DVFS is actually handled via the cdclk code paths,
2141 	 * hence we do nothing here.
2142 	 */
2143 
2144 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2145 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2146 	val &= ~PLL_POWER_ENABLE;
2147 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2148 
2149 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2150 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2151 				    PLL_POWER_STATE, 5))
2152 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2153 }
2154 
2155 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2156 				     struct intel_shared_dpll *pll,
2157 				     struct intel_dpll_hw_state *hw_state)
2158 {
2159 	const enum intel_dpll_id id = pll->info->id;
2160 	intel_wakeref_t wakeref;
2161 	u32 val;
2162 	bool ret;
2163 
2164 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2165 						     POWER_DOMAIN_DISPLAY_CORE);
2166 	if (!wakeref)
2167 		return false;
2168 
2169 	ret = false;
2170 
2171 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2172 	if (!(val & PLL_ENABLE))
2173 		goto out;
2174 
2175 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2176 	hw_state->cfgcr0 = val;
2177 
2178 	/* avoid reading back stale values if HDMI mode is not enabled */
2179 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2180 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2181 						 CNL_DPLL_CFGCR1(id));
2182 	}
2183 	ret = true;
2184 
2185 out:
2186 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2187 
2188 	return ret;
2189 }
2190 
2191 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2192 				      int *qdiv, int *kdiv)
2193 {
2194 	/* even dividers */
2195 	if (bestdiv % 2 == 0) {
2196 		if (bestdiv == 2) {
2197 			*pdiv = 2;
2198 			*qdiv = 1;
2199 			*kdiv = 1;
2200 		} else if (bestdiv % 4 == 0) {
2201 			*pdiv = 2;
2202 			*qdiv = bestdiv / 4;
2203 			*kdiv = 2;
2204 		} else if (bestdiv % 6 == 0) {
2205 			*pdiv = 3;
2206 			*qdiv = bestdiv / 6;
2207 			*kdiv = 2;
2208 		} else if (bestdiv % 5 == 0) {
2209 			*pdiv = 5;
2210 			*qdiv = bestdiv / 10;
2211 			*kdiv = 2;
2212 		} else if (bestdiv % 14 == 0) {
2213 			*pdiv = 7;
2214 			*qdiv = bestdiv / 14;
2215 			*kdiv = 2;
2216 		}
2217 	} else {
2218 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2219 			*pdiv = bestdiv;
2220 			*qdiv = 1;
2221 			*kdiv = 1;
2222 		} else { /* 9, 15, 21 */
2223 			*pdiv = bestdiv / 3;
2224 			*qdiv = 1;
2225 			*kdiv = 3;
2226 		}
2227 	}
2228 }
2229 
2230 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2231 				      u32 dco_freq, u32 ref_freq,
2232 				      int pdiv, int qdiv, int kdiv)
2233 {
2234 	u32 dco;
2235 
2236 	switch (kdiv) {
2237 	case 1:
2238 		params->kdiv = 1;
2239 		break;
2240 	case 2:
2241 		params->kdiv = 2;
2242 		break;
2243 	case 3:
2244 		params->kdiv = 4;
2245 		break;
2246 	default:
2247 		WARN(1, "Incorrect KDiv\n");
2248 	}
2249 
2250 	switch (pdiv) {
2251 	case 2:
2252 		params->pdiv = 1;
2253 		break;
2254 	case 3:
2255 		params->pdiv = 2;
2256 		break;
2257 	case 5:
2258 		params->pdiv = 4;
2259 		break;
2260 	case 7:
2261 		params->pdiv = 8;
2262 		break;
2263 	default:
2264 		WARN(1, "Incorrect PDiv\n");
2265 	}
2266 
2267 	WARN_ON(kdiv != 2 && qdiv != 1);
2268 
2269 	params->qdiv_ratio = qdiv;
2270 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2271 
2272 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2273 
2274 	params->dco_integer = dco >> 15;
2275 	params->dco_fraction = dco & 0x7fff;
2276 }
2277 
2278 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2279 {
2280 	int ref_clock = dev_priv->cdclk.hw.ref;
2281 
2282 	/*
2283 	 * For ICL+, the spec states: if reference frequency is 38.4,
2284 	 * use 19.2 because the DPLL automatically divides that by 2.
2285 	 */
2286 	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2287 		ref_clock = 19200;
2288 
2289 	return ref_clock;
2290 }
2291 
2292 static bool
2293 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2294 			struct skl_wrpll_params *wrpll_params)
2295 {
2296 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2297 	u32 afe_clock = crtc_state->port_clock * 5;
2298 	u32 ref_clock;
2299 	u32 dco_min = 7998000;
2300 	u32 dco_max = 10000000;
2301 	u32 dco_mid = (dco_min + dco_max) / 2;
2302 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2303 					 18, 20, 24, 28, 30, 32,  36,  40,
2304 					 42, 44, 48, 50, 52, 54,  56,  60,
2305 					 64, 66, 68, 70, 72, 76,  78,  80,
2306 					 84, 88, 90, 92, 96, 98, 100, 102,
2307 					  3,  5,  7,  9, 15, 21 };
2308 	u32 dco, best_dco = 0, dco_centrality = 0;
2309 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2310 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2311 
2312 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2313 		dco = afe_clock * dividers[d];
2314 
2315 		if ((dco <= dco_max) && (dco >= dco_min)) {
2316 			dco_centrality = abs(dco - dco_mid);
2317 
2318 			if (dco_centrality < best_dco_centrality) {
2319 				best_dco_centrality = dco_centrality;
2320 				best_div = dividers[d];
2321 				best_dco = dco;
2322 			}
2323 		}
2324 	}
2325 
2326 	if (best_div == 0)
2327 		return false;
2328 
2329 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2330 
2331 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2332 
2333 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2334 				  pdiv, qdiv, kdiv);
2335 
2336 	return true;
2337 }
2338 
2339 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2340 {
2341 	u32 cfgcr0, cfgcr1;
2342 	struct skl_wrpll_params wrpll_params = { 0, };
2343 
2344 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2345 
2346 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2347 		return false;
2348 
2349 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2350 		wrpll_params.dco_integer;
2351 
2352 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2353 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2354 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2355 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2356 		DPLL_CFGCR1_CENTRAL_FREQ;
2357 
2358 	memset(&crtc_state->dpll_hw_state, 0,
2359 	       sizeof(crtc_state->dpll_hw_state));
2360 
2361 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2362 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2363 	return true;
2364 }
2365 
2366 static bool
2367 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2368 {
2369 	u32 cfgcr0;
2370 
2371 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2372 
2373 	switch (crtc_state->port_clock / 2) {
2374 	case 81000:
2375 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2376 		break;
2377 	case 135000:
2378 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2379 		break;
2380 	case 270000:
2381 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2382 		break;
2383 		/* eDP 1.4 rates */
2384 	case 162000:
2385 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2386 		break;
2387 	case 108000:
2388 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2389 		break;
2390 	case 216000:
2391 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2392 		break;
2393 	case 324000:
2394 		/* Some SKUs may require elevated I/O voltage to support this */
2395 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2396 		break;
2397 	case 405000:
2398 		/* Some SKUs may require elevated I/O voltage to support this */
2399 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2400 		break;
2401 	}
2402 
2403 	memset(&crtc_state->dpll_hw_state, 0,
2404 	       sizeof(crtc_state->dpll_hw_state));
2405 
2406 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2407 
2408 	return true;
2409 }
2410 
2411 static bool cnl_get_dpll(struct intel_atomic_state *state,
2412 			 struct intel_crtc *crtc,
2413 			 struct intel_encoder *encoder)
2414 {
2415 	struct intel_crtc_state *crtc_state =
2416 		intel_atomic_get_new_crtc_state(state, crtc);
2417 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2418 	struct intel_shared_dpll *pll;
2419 	bool bret;
2420 
2421 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2422 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2423 		if (!bret) {
2424 			drm_dbg_kms(&i915->drm,
2425 				    "Could not get HDMI pll dividers.\n");
2426 			return false;
2427 		}
2428 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2429 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2430 		if (!bret) {
2431 			drm_dbg_kms(&i915->drm,
2432 				    "Could not set DP dpll HW state.\n");
2433 			return false;
2434 		}
2435 	} else {
2436 		drm_dbg_kms(&i915->drm,
2437 			    "Skip DPLL setup for output_types 0x%x\n",
2438 			    crtc_state->output_types);
2439 		return false;
2440 	}
2441 
2442 	pll = intel_find_shared_dpll(state, crtc,
2443 				     &crtc_state->dpll_hw_state,
2444 				     BIT(DPLL_ID_SKL_DPLL2) |
2445 				     BIT(DPLL_ID_SKL_DPLL1) |
2446 				     BIT(DPLL_ID_SKL_DPLL0));
2447 	if (!pll) {
2448 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2449 		return false;
2450 	}
2451 
2452 	intel_reference_shared_dpll(state, crtc,
2453 				    pll, &crtc_state->dpll_hw_state);
2454 
2455 	crtc_state->shared_dpll = pll;
2456 
2457 	return true;
2458 }
2459 
2460 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2461 			      const struct intel_dpll_hw_state *hw_state)
2462 {
2463 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2464 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2465 		    hw_state->cfgcr0,
2466 		    hw_state->cfgcr1);
2467 }
2468 
2469 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2470 	.enable = cnl_ddi_pll_enable,
2471 	.disable = cnl_ddi_pll_disable,
2472 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2473 };
2474 
2475 static const struct dpll_info cnl_plls[] = {
2476 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2477 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2478 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2479 	{ },
2480 };
2481 
2482 static const struct intel_dpll_mgr cnl_pll_mgr = {
2483 	.dpll_info = cnl_plls,
2484 	.get_dplls = cnl_get_dpll,
2485 	.put_dplls = intel_put_dpll,
2486 	.dump_hw_state = cnl_dump_hw_state,
2487 };
2488 
2489 struct icl_combo_pll_params {
2490 	int clock;
2491 	struct skl_wrpll_params wrpll;
2492 };
2493 
2494 /*
2495  * These values alrea already adjusted: they're the bits we write to the
2496  * registers, not the logical values.
2497  */
2498 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2499 	{ 540000,
2500 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2501 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2502 	{ 270000,
2503 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2504 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2505 	{ 162000,
2506 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2507 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 	{ 324000,
2509 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2510 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2511 	{ 216000,
2512 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2513 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2514 	{ 432000,
2515 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2516 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 	{ 648000,
2518 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2519 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2520 	{ 810000,
2521 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2522 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 };
2524 
2525 
2526 /* Also used for 38.4 MHz values. */
2527 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2528 	{ 540000,
2529 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2530 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531 	{ 270000,
2532 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2533 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2534 	{ 162000,
2535 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2536 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2537 	{ 324000,
2538 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2539 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2540 	{ 216000,
2541 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2542 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2543 	{ 432000,
2544 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2545 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2546 	{ 648000,
2547 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2548 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2549 	{ 810000,
2550 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2551 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2552 };
2553 
2554 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2555 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2556 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2557 };
2558 
2559 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2560 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2561 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2562 };
2563 
2564 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2565 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2566 	/* the following params are unused */
2567 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2568 };
2569 
2570 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2571 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2572 	/* the following params are unused */
2573 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2574 };
2575 
2576 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2577 				  struct skl_wrpll_params *pll_params)
2578 {
2579 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2580 	const struct icl_combo_pll_params *params =
2581 		dev_priv->cdclk.hw.ref == 24000 ?
2582 		icl_dp_combo_pll_24MHz_values :
2583 		icl_dp_combo_pll_19_2MHz_values;
2584 	int clock = crtc_state->port_clock;
2585 	int i;
2586 
2587 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2588 		if (clock == params[i].clock) {
2589 			*pll_params = params[i].wrpll;
2590 			return true;
2591 		}
2592 	}
2593 
2594 	MISSING_CASE(clock);
2595 	return false;
2596 }
2597 
2598 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2599 			     struct skl_wrpll_params *pll_params)
2600 {
2601 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2602 
2603 	if (INTEL_GEN(dev_priv) >= 12) {
2604 		switch (dev_priv->cdclk.hw.ref) {
2605 		default:
2606 			MISSING_CASE(dev_priv->cdclk.hw.ref);
2607 			/* fall-through */
2608 		case 19200:
2609 		case 38400:
2610 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2611 			break;
2612 		case 24000:
2613 			*pll_params = tgl_tbt_pll_24MHz_values;
2614 			break;
2615 		}
2616 	} else {
2617 		switch (dev_priv->cdclk.hw.ref) {
2618 		default:
2619 			MISSING_CASE(dev_priv->cdclk.hw.ref);
2620 			/* fall-through */
2621 		case 19200:
2622 		case 38400:
2623 			*pll_params = icl_tbt_pll_19_2MHz_values;
2624 			break;
2625 		case 24000:
2626 			*pll_params = icl_tbt_pll_24MHz_values;
2627 			break;
2628 		}
2629 	}
2630 
2631 	return true;
2632 }
2633 
2634 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2635 				struct intel_encoder *encoder,
2636 				struct intel_dpll_hw_state *pll_state)
2637 {
2638 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2639 	u32 cfgcr0, cfgcr1;
2640 	struct skl_wrpll_params pll_params = { 0 };
2641 	bool ret;
2642 
2643 	if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2644 							encoder->port)))
2645 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2646 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2647 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2648 		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2649 	else
2650 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2651 
2652 	if (!ret)
2653 		return false;
2654 
2655 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2656 		 pll_params.dco_integer;
2657 
2658 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2659 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2660 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2661 		 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2662 
2663 	if (INTEL_GEN(dev_priv) >= 12)
2664 		cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2665 	else
2666 		cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2667 
2668 	memset(pll_state, 0, sizeof(*pll_state));
2669 
2670 	pll_state->cfgcr0 = cfgcr0;
2671 	pll_state->cfgcr1 = cfgcr1;
2672 
2673 	return true;
2674 }
2675 
2676 
2677 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2678 {
2679 	return id - DPLL_ID_ICL_MGPLL1;
2680 }
2681 
2682 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2683 {
2684 	return tc_port + DPLL_ID_ICL_MGPLL1;
2685 }
2686 
2687 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2688 				     u32 *target_dco_khz,
2689 				     struct intel_dpll_hw_state *state,
2690 				     bool is_dkl)
2691 {
2692 	u32 dco_min_freq, dco_max_freq;
2693 	int div1_vals[] = {7, 5, 3, 2};
2694 	unsigned int i;
2695 	int div2;
2696 
2697 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2698 	dco_max_freq = is_dp ? 8100000 : 10000000;
2699 
2700 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2701 		int div1 = div1_vals[i];
2702 
2703 		for (div2 = 10; div2 > 0; div2--) {
2704 			int dco = div1 * div2 * clock_khz * 5;
2705 			int a_divratio, tlinedrv, inputsel;
2706 			u32 hsdiv;
2707 
2708 			if (dco < dco_min_freq || dco > dco_max_freq)
2709 				continue;
2710 
2711 			if (div2 >= 2) {
2712 				/*
2713 				 * Note: a_divratio not matching TGL BSpec
2714 				 * algorithm but matching hardcoded values and
2715 				 * working on HW for DP alt-mode at least
2716 				 */
2717 				a_divratio = is_dp ? 10 : 5;
2718 				tlinedrv = is_dkl ? 1 : 2;
2719 			} else {
2720 				a_divratio = 5;
2721 				tlinedrv = 0;
2722 			}
2723 			inputsel = is_dp ? 0 : 1;
2724 
2725 			switch (div1) {
2726 			default:
2727 				MISSING_CASE(div1);
2728 				/* fall through */
2729 			case 2:
2730 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2731 				break;
2732 			case 3:
2733 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2734 				break;
2735 			case 5:
2736 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2737 				break;
2738 			case 7:
2739 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2740 				break;
2741 			}
2742 
2743 			*target_dco_khz = dco;
2744 
2745 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2746 
2747 			state->mg_clktop2_coreclkctl1 =
2748 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2749 
2750 			state->mg_clktop2_hsclkctl =
2751 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2752 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2753 				hsdiv |
2754 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2755 
2756 			return true;
2757 		}
2758 	}
2759 
2760 	return false;
2761 }
2762 
2763 /*
2764  * The specification for this function uses real numbers, so the math had to be
2765  * adapted to integer-only calculation, that's why it looks so different.
2766  */
2767 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2768 				  struct intel_dpll_hw_state *pll_state)
2769 {
2770 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2771 	int refclk_khz = dev_priv->cdclk.hw.ref;
2772 	int clock = crtc_state->port_clock;
2773 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2774 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2775 	u32 prop_coeff, int_coeff;
2776 	u32 tdc_targetcnt, feedfwgain;
2777 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2778 	u64 tmp;
2779 	bool use_ssc = false;
2780 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2781 	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
2782 
2783 	memset(pll_state, 0, sizeof(*pll_state));
2784 
2785 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2786 				      pll_state, is_dkl)) {
2787 		drm_dbg_kms(&dev_priv->drm,
2788 			    "Failed to find divisors for clock %d\n", clock);
2789 		return false;
2790 	}
2791 
2792 	m1div = 2;
2793 	m2div_int = dco_khz / (refclk_khz * m1div);
2794 	if (m2div_int > 255) {
2795 		if (!is_dkl) {
2796 			m1div = 4;
2797 			m2div_int = dco_khz / (refclk_khz * m1div);
2798 		}
2799 
2800 		if (m2div_int > 255) {
2801 			drm_dbg_kms(&dev_priv->drm,
2802 				    "Failed to find mdiv for clock %d\n",
2803 				    clock);
2804 			return false;
2805 		}
2806 	}
2807 	m2div_rem = dco_khz % (refclk_khz * m1div);
2808 
2809 	tmp = (u64)m2div_rem * (1 << 22);
2810 	do_div(tmp, refclk_khz * m1div);
2811 	m2div_frac = tmp;
2812 
2813 	switch (refclk_khz) {
2814 	case 19200:
2815 		iref_ndiv = 1;
2816 		iref_trim = 28;
2817 		iref_pulse_w = 1;
2818 		break;
2819 	case 24000:
2820 		iref_ndiv = 1;
2821 		iref_trim = 25;
2822 		iref_pulse_w = 2;
2823 		break;
2824 	case 38400:
2825 		iref_ndiv = 2;
2826 		iref_trim = 28;
2827 		iref_pulse_w = 1;
2828 		break;
2829 	default:
2830 		MISSING_CASE(refclk_khz);
2831 		return false;
2832 	}
2833 
2834 	/*
2835 	 * tdc_res = 0.000003
2836 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2837 	 *
2838 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2839 	 * was supposed to be a division, but we rearranged the operations of
2840 	 * the formula to avoid early divisions so we don't multiply the
2841 	 * rounding errors.
2842 	 *
2843 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2844 	 * we also rearrange to work with integers.
2845 	 *
2846 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2847 	 * last division by 10.
2848 	 */
2849 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2850 
2851 	/*
2852 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2853 	 * 32 bits. That's not a problem since we round the division down
2854 	 * anyway.
2855 	 */
2856 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2857 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2858 
2859 	if (dco_khz >= 9000000) {
2860 		prop_coeff = 5;
2861 		int_coeff = 10;
2862 	} else {
2863 		prop_coeff = 4;
2864 		int_coeff = 8;
2865 	}
2866 
2867 	if (use_ssc) {
2868 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2869 		do_div(tmp, refclk_khz * m1div * 10000);
2870 		ssc_stepsize = tmp;
2871 
2872 		tmp = mul_u32_u32(dco_khz, 1000);
2873 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2874 	} else {
2875 		ssc_stepsize = 0;
2876 		ssc_steplen = 0;
2877 	}
2878 	ssc_steplog = 4;
2879 
2880 	/* write pll_state calculations */
2881 	if (is_dkl) {
2882 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2883 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2884 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2885 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2886 
2887 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2888 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2889 
2890 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2891 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2892 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2893 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2894 
2895 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2896 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2897 
2898 		pll_state->mg_pll_tdc_coldst_bias =
2899 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2900 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2901 
2902 	} else {
2903 		pll_state->mg_pll_div0 =
2904 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2905 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2906 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2907 
2908 		pll_state->mg_pll_div1 =
2909 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2910 			MG_PLL_DIV1_DITHER_DIV_2 |
2911 			MG_PLL_DIV1_NDIVRATIO(1) |
2912 			MG_PLL_DIV1_FBPREDIV(m1div);
2913 
2914 		pll_state->mg_pll_lf =
2915 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2916 			MG_PLL_LF_AFCCNTSEL_512 |
2917 			MG_PLL_LF_GAINCTRL(1) |
2918 			MG_PLL_LF_INT_COEFF(int_coeff) |
2919 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2920 
2921 		pll_state->mg_pll_frac_lock =
2922 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2923 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2924 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2925 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2926 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2927 		if (use_ssc || m2div_rem > 0)
2928 			pll_state->mg_pll_frac_lock |=
2929 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2930 
2931 		pll_state->mg_pll_ssc =
2932 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2933 			MG_PLL_SSC_TYPE(2) |
2934 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2935 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2936 			MG_PLL_SSC_FLLEN |
2937 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2938 
2939 		pll_state->mg_pll_tdc_coldst_bias =
2940 			MG_PLL_TDC_COLDST_COLDSTART |
2941 			MG_PLL_TDC_COLDST_IREFINT_EN |
2942 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2943 			MG_PLL_TDC_TDCOVCCORR_EN |
2944 			MG_PLL_TDC_TDCSEL(3);
2945 
2946 		pll_state->mg_pll_bias =
2947 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
2948 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2949 			MG_PLL_BIAS_BIAS_BONUS(10) |
2950 			MG_PLL_BIAS_BIASCAL_EN |
2951 			MG_PLL_BIAS_CTRIM(12) |
2952 			MG_PLL_BIAS_VREF_RDAC(4) |
2953 			MG_PLL_BIAS_IREFTRIM(iref_trim);
2954 
2955 		if (refclk_khz == 38400) {
2956 			pll_state->mg_pll_tdc_coldst_bias_mask =
2957 				MG_PLL_TDC_COLDST_COLDSTART;
2958 			pll_state->mg_pll_bias_mask = 0;
2959 		} else {
2960 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2961 			pll_state->mg_pll_bias_mask = -1U;
2962 		}
2963 
2964 		pll_state->mg_pll_tdc_coldst_bias &=
2965 			pll_state->mg_pll_tdc_coldst_bias_mask;
2966 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2967 	}
2968 
2969 	return true;
2970 }
2971 
2972 /**
2973  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2974  * @crtc_state: state for the CRTC to select the DPLL for
2975  * @port_dpll_id: the active @port_dpll_id to select
2976  *
2977  * Select the given @port_dpll_id instance from the DPLLs reserved for the
2978  * CRTC.
2979  */
2980 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2981 			      enum icl_port_dpll_id port_dpll_id)
2982 {
2983 	struct icl_port_dpll *port_dpll =
2984 		&crtc_state->icl_port_dplls[port_dpll_id];
2985 
2986 	crtc_state->shared_dpll = port_dpll->pll;
2987 	crtc_state->dpll_hw_state = port_dpll->hw_state;
2988 }
2989 
2990 static void icl_update_active_dpll(struct intel_atomic_state *state,
2991 				   struct intel_crtc *crtc,
2992 				   struct intel_encoder *encoder)
2993 {
2994 	struct intel_crtc_state *crtc_state =
2995 		intel_atomic_get_new_crtc_state(state, crtc);
2996 	struct intel_digital_port *primary_port;
2997 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2998 
2999 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3000 		enc_to_mst(encoder)->primary :
3001 		enc_to_dig_port(encoder);
3002 
3003 	if (primary_port &&
3004 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3005 	     primary_port->tc_mode == TC_PORT_LEGACY))
3006 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3007 
3008 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3009 }
3010 
3011 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3012 				   struct intel_crtc *crtc,
3013 				   struct intel_encoder *encoder)
3014 {
3015 	struct intel_crtc_state *crtc_state =
3016 		intel_atomic_get_new_crtc_state(state, crtc);
3017 	struct icl_port_dpll *port_dpll =
3018 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3019 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3020 	enum port port = encoder->port;
3021 	unsigned long dpll_mask;
3022 
3023 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3024 		drm_dbg_kms(&dev_priv->drm,
3025 			    "Could not calculate combo PHY PLL state.\n");
3026 
3027 		return false;
3028 	}
3029 
3030 	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
3031 		dpll_mask =
3032 			BIT(DPLL_ID_EHL_DPLL4) |
3033 			BIT(DPLL_ID_ICL_DPLL1) |
3034 			BIT(DPLL_ID_ICL_DPLL0);
3035 	else
3036 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3037 
3038 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3039 						&port_dpll->hw_state,
3040 						dpll_mask);
3041 	if (!port_dpll->pll) {
3042 		drm_dbg_kms(&dev_priv->drm,
3043 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3044 			    encoder->base.base.id, encoder->base.name);
3045 		return false;
3046 	}
3047 
3048 	intel_reference_shared_dpll(state, crtc,
3049 				    port_dpll->pll, &port_dpll->hw_state);
3050 
3051 	icl_update_active_dpll(state, crtc, encoder);
3052 
3053 	return true;
3054 }
3055 
3056 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3057 				 struct intel_crtc *crtc,
3058 				 struct intel_encoder *encoder)
3059 {
3060 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3061 	struct intel_crtc_state *crtc_state =
3062 		intel_atomic_get_new_crtc_state(state, crtc);
3063 	struct icl_port_dpll *port_dpll;
3064 	enum intel_dpll_id dpll_id;
3065 
3066 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3067 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3068 		drm_dbg_kms(&dev_priv->drm,
3069 			    "Could not calculate TBT PLL state.\n");
3070 		return false;
3071 	}
3072 
3073 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3074 						&port_dpll->hw_state,
3075 						BIT(DPLL_ID_ICL_TBTPLL));
3076 	if (!port_dpll->pll) {
3077 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3078 		return false;
3079 	}
3080 	intel_reference_shared_dpll(state, crtc,
3081 				    port_dpll->pll, &port_dpll->hw_state);
3082 
3083 
3084 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3085 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3086 		drm_dbg_kms(&dev_priv->drm,
3087 			    "Could not calculate MG PHY PLL state.\n");
3088 		goto err_unreference_tbt_pll;
3089 	}
3090 
3091 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3092 							 encoder->port));
3093 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3094 						&port_dpll->hw_state,
3095 						BIT(dpll_id));
3096 	if (!port_dpll->pll) {
3097 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3098 		goto err_unreference_tbt_pll;
3099 	}
3100 	intel_reference_shared_dpll(state, crtc,
3101 				    port_dpll->pll, &port_dpll->hw_state);
3102 
3103 	icl_update_active_dpll(state, crtc, encoder);
3104 
3105 	return true;
3106 
3107 err_unreference_tbt_pll:
3108 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3109 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3110 
3111 	return false;
3112 }
3113 
3114 static bool icl_get_dplls(struct intel_atomic_state *state,
3115 			  struct intel_crtc *crtc,
3116 			  struct intel_encoder *encoder)
3117 {
3118 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3119 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3120 
3121 	if (intel_phy_is_combo(dev_priv, phy))
3122 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3123 	else if (intel_phy_is_tc(dev_priv, phy))
3124 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3125 
3126 	MISSING_CASE(phy);
3127 
3128 	return false;
3129 }
3130 
3131 static void icl_put_dplls(struct intel_atomic_state *state,
3132 			  struct intel_crtc *crtc)
3133 {
3134 	const struct intel_crtc_state *old_crtc_state =
3135 		intel_atomic_get_old_crtc_state(state, crtc);
3136 	struct intel_crtc_state *new_crtc_state =
3137 		intel_atomic_get_new_crtc_state(state, crtc);
3138 	enum icl_port_dpll_id id;
3139 
3140 	new_crtc_state->shared_dpll = NULL;
3141 
3142 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3143 		const struct icl_port_dpll *old_port_dpll =
3144 			&old_crtc_state->icl_port_dplls[id];
3145 		struct icl_port_dpll *new_port_dpll =
3146 			&new_crtc_state->icl_port_dplls[id];
3147 
3148 		new_port_dpll->pll = NULL;
3149 
3150 		if (!old_port_dpll->pll)
3151 			continue;
3152 
3153 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3154 	}
3155 }
3156 
3157 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3158 				struct intel_shared_dpll *pll,
3159 				struct intel_dpll_hw_state *hw_state)
3160 {
3161 	const enum intel_dpll_id id = pll->info->id;
3162 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3163 	intel_wakeref_t wakeref;
3164 	bool ret = false;
3165 	u32 val;
3166 
3167 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3168 						     POWER_DOMAIN_DISPLAY_CORE);
3169 	if (!wakeref)
3170 		return false;
3171 
3172 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3173 	if (!(val & PLL_ENABLE))
3174 		goto out;
3175 
3176 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3177 						  MG_REFCLKIN_CTL(tc_port));
3178 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3179 
3180 	hw_state->mg_clktop2_coreclkctl1 =
3181 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3182 	hw_state->mg_clktop2_coreclkctl1 &=
3183 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3184 
3185 	hw_state->mg_clktop2_hsclkctl =
3186 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3187 	hw_state->mg_clktop2_hsclkctl &=
3188 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3189 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3190 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3191 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3192 
3193 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3194 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3195 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3196 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3197 						   MG_PLL_FRAC_LOCK(tc_port));
3198 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3199 
3200 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3201 	hw_state->mg_pll_tdc_coldst_bias =
3202 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3203 
3204 	if (dev_priv->cdclk.hw.ref == 38400) {
3205 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3206 		hw_state->mg_pll_bias_mask = 0;
3207 	} else {
3208 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3209 		hw_state->mg_pll_bias_mask = -1U;
3210 	}
3211 
3212 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3213 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3214 
3215 	ret = true;
3216 out:
3217 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3218 	return ret;
3219 }
3220 
3221 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3222 				 struct intel_shared_dpll *pll,
3223 				 struct intel_dpll_hw_state *hw_state)
3224 {
3225 	const enum intel_dpll_id id = pll->info->id;
3226 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3227 	intel_wakeref_t wakeref;
3228 	bool ret = false;
3229 	u32 val;
3230 
3231 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3232 						     POWER_DOMAIN_DISPLAY_CORE);
3233 	if (!wakeref)
3234 		return false;
3235 
3236 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3237 	if (!(val & PLL_ENABLE))
3238 		goto out;
3239 
3240 	/*
3241 	 * All registers read here have the same HIP_INDEX_REG even though
3242 	 * they are on different building blocks
3243 	 */
3244 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3245 		       HIP_INDEX_VAL(tc_port, 0x2));
3246 
3247 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3248 						  DKL_REFCLKIN_CTL(tc_port));
3249 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3250 
3251 	hw_state->mg_clktop2_hsclkctl =
3252 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3253 	hw_state->mg_clktop2_hsclkctl &=
3254 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3255 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3256 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3257 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3258 
3259 	hw_state->mg_clktop2_coreclkctl1 =
3260 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3261 	hw_state->mg_clktop2_coreclkctl1 &=
3262 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3263 
3264 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3265 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3266 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3267 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3268 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3269 
3270 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3271 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3272 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3273 
3274 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3275 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3276 				 DKL_PLL_SSC_STEP_LEN_MASK |
3277 				 DKL_PLL_SSC_STEP_NUM_MASK |
3278 				 DKL_PLL_SSC_EN);
3279 
3280 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3281 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3282 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3283 
3284 	hw_state->mg_pll_tdc_coldst_bias =
3285 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3286 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3287 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3288 
3289 	ret = true;
3290 out:
3291 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3292 	return ret;
3293 }
3294 
3295 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3296 				 struct intel_shared_dpll *pll,
3297 				 struct intel_dpll_hw_state *hw_state,
3298 				 i915_reg_t enable_reg)
3299 {
3300 	const enum intel_dpll_id id = pll->info->id;
3301 	intel_wakeref_t wakeref;
3302 	bool ret = false;
3303 	u32 val;
3304 
3305 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3306 						     POWER_DOMAIN_DISPLAY_CORE);
3307 	if (!wakeref)
3308 		return false;
3309 
3310 	val = intel_de_read(dev_priv, enable_reg);
3311 	if (!(val & PLL_ENABLE))
3312 		goto out;
3313 
3314 	if (INTEL_GEN(dev_priv) >= 12) {
3315 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3316 						 TGL_DPLL_CFGCR0(id));
3317 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3318 						 TGL_DPLL_CFGCR1(id));
3319 	} else {
3320 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3321 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3322 							 ICL_DPLL_CFGCR0(4));
3323 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3324 							 ICL_DPLL_CFGCR1(4));
3325 		} else {
3326 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3327 							 ICL_DPLL_CFGCR0(id));
3328 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3329 							 ICL_DPLL_CFGCR1(id));
3330 		}
3331 	}
3332 
3333 	ret = true;
3334 out:
3335 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3336 	return ret;
3337 }
3338 
3339 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3340 				   struct intel_shared_dpll *pll,
3341 				   struct intel_dpll_hw_state *hw_state)
3342 {
3343 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3344 
3345 	if (IS_ELKHARTLAKE(dev_priv) &&
3346 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3347 		enable_reg = MG_PLL_ENABLE(0);
3348 	}
3349 
3350 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3351 }
3352 
3353 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3354 				 struct intel_shared_dpll *pll,
3355 				 struct intel_dpll_hw_state *hw_state)
3356 {
3357 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3358 }
3359 
3360 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3361 			   struct intel_shared_dpll *pll)
3362 {
3363 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3364 	const enum intel_dpll_id id = pll->info->id;
3365 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3366 
3367 	if (INTEL_GEN(dev_priv) >= 12) {
3368 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3369 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3370 	} else {
3371 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3372 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3373 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3374 		} else {
3375 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3376 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3377 		}
3378 	}
3379 
3380 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3381 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3382 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3383 }
3384 
3385 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3386 			     struct intel_shared_dpll *pll)
3387 {
3388 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3389 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3390 	u32 val;
3391 
3392 	/*
3393 	 * Some of the following registers have reserved fields, so program
3394 	 * these with RMW based on a mask. The mask can be fixed or generated
3395 	 * during the calc/readout phase if the mask depends on some other HW
3396 	 * state like refclk, see icl_calc_mg_pll_state().
3397 	 */
3398 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3399 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3400 	val |= hw_state->mg_refclkin_ctl;
3401 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3402 
3403 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3404 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3405 	val |= hw_state->mg_clktop2_coreclkctl1;
3406 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3407 
3408 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3409 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3410 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3411 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3412 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3413 	val |= hw_state->mg_clktop2_hsclkctl;
3414 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3415 
3416 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3417 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3418 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3419 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3420 		       hw_state->mg_pll_frac_lock);
3421 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3422 
3423 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3424 	val &= ~hw_state->mg_pll_bias_mask;
3425 	val |= hw_state->mg_pll_bias;
3426 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3427 
3428 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3429 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3430 	val |= hw_state->mg_pll_tdc_coldst_bias;
3431 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3432 
3433 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3434 }
3435 
3436 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3437 			  struct intel_shared_dpll *pll)
3438 {
3439 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3440 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3441 	u32 val;
3442 
3443 	/*
3444 	 * All registers programmed here have the same HIP_INDEX_REG even
3445 	 * though on different building block
3446 	 */
3447 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3448 		       HIP_INDEX_VAL(tc_port, 0x2));
3449 
3450 	/* All the registers are RMW */
3451 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3452 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3453 	val |= hw_state->mg_refclkin_ctl;
3454 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3455 
3456 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3457 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3458 	val |= hw_state->mg_clktop2_coreclkctl1;
3459 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3460 
3461 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3462 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3463 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3464 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3465 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3466 	val |= hw_state->mg_clktop2_hsclkctl;
3467 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3468 
3469 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3470 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3471 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3472 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3473 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3474 	val |= hw_state->mg_pll_div0;
3475 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3476 
3477 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3478 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3479 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3480 	val |= hw_state->mg_pll_div1;
3481 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3482 
3483 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3484 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3485 		 DKL_PLL_SSC_STEP_LEN_MASK |
3486 		 DKL_PLL_SSC_STEP_NUM_MASK |
3487 		 DKL_PLL_SSC_EN);
3488 	val |= hw_state->mg_pll_ssc;
3489 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3490 
3491 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3492 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3493 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3494 	val |= hw_state->mg_pll_bias;
3495 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3496 
3497 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3498 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3499 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3500 	val |= hw_state->mg_pll_tdc_coldst_bias;
3501 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3502 
3503 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3504 }
3505 
3506 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3507 				 struct intel_shared_dpll *pll,
3508 				 i915_reg_t enable_reg)
3509 {
3510 	u32 val;
3511 
3512 	val = intel_de_read(dev_priv, enable_reg);
3513 	val |= PLL_POWER_ENABLE;
3514 	intel_de_write(dev_priv, enable_reg, val);
3515 
3516 	/*
3517 	 * The spec says we need to "wait" but it also says it should be
3518 	 * immediate.
3519 	 */
3520 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3521 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3522 			pll->info->id);
3523 }
3524 
3525 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3526 			   struct intel_shared_dpll *pll,
3527 			   i915_reg_t enable_reg)
3528 {
3529 	u32 val;
3530 
3531 	val = intel_de_read(dev_priv, enable_reg);
3532 	val |= PLL_ENABLE;
3533 	intel_de_write(dev_priv, enable_reg, val);
3534 
3535 	/* Timeout is actually 600us. */
3536 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3537 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3538 }
3539 
3540 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3541 			     struct intel_shared_dpll *pll)
3542 {
3543 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3544 
3545 	if (IS_ELKHARTLAKE(dev_priv) &&
3546 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3547 		enable_reg = MG_PLL_ENABLE(0);
3548 
3549 		/*
3550 		 * We need to disable DC states when this DPLL is enabled.
3551 		 * This can be done by taking a reference on DPLL4 power
3552 		 * domain.
3553 		 */
3554 		pll->wakeref = intel_display_power_get(dev_priv,
3555 						       POWER_DOMAIN_DPLL_DC_OFF);
3556 	}
3557 
3558 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3559 
3560 	icl_dpll_write(dev_priv, pll);
3561 
3562 	/*
3563 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3564 	 * paths should already be setting the appropriate voltage, hence we do
3565 	 * nothing here.
3566 	 */
3567 
3568 	icl_pll_enable(dev_priv, pll, enable_reg);
3569 
3570 	/* DVFS post sequence would be here. See the comment above. */
3571 }
3572 
3573 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3574 			   struct intel_shared_dpll *pll)
3575 {
3576 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3577 
3578 	icl_dpll_write(dev_priv, pll);
3579 
3580 	/*
3581 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3582 	 * paths should already be setting the appropriate voltage, hence we do
3583 	 * nothing here.
3584 	 */
3585 
3586 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3587 
3588 	/* DVFS post sequence would be here. See the comment above. */
3589 }
3590 
3591 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3592 			  struct intel_shared_dpll *pll)
3593 {
3594 	i915_reg_t enable_reg =
3595 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3596 
3597 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3598 
3599 	if (INTEL_GEN(dev_priv) >= 12)
3600 		dkl_pll_write(dev_priv, pll);
3601 	else
3602 		icl_mg_pll_write(dev_priv, pll);
3603 
3604 	/*
3605 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3606 	 * paths should already be setting the appropriate voltage, hence we do
3607 	 * nothing here.
3608 	 */
3609 
3610 	icl_pll_enable(dev_priv, pll, enable_reg);
3611 
3612 	/* DVFS post sequence would be here. See the comment above. */
3613 }
3614 
3615 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3616 			    struct intel_shared_dpll *pll,
3617 			    i915_reg_t enable_reg)
3618 {
3619 	u32 val;
3620 
3621 	/* The first steps are done by intel_ddi_post_disable(). */
3622 
3623 	/*
3624 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3625 	 * paths should already be setting the appropriate voltage, hence we do
3626 	 * nothign here.
3627 	 */
3628 
3629 	val = intel_de_read(dev_priv, enable_reg);
3630 	val &= ~PLL_ENABLE;
3631 	intel_de_write(dev_priv, enable_reg, val);
3632 
3633 	/* Timeout is actually 1us. */
3634 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3635 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3636 
3637 	/* DVFS post sequence would be here. See the comment above. */
3638 
3639 	val = intel_de_read(dev_priv, enable_reg);
3640 	val &= ~PLL_POWER_ENABLE;
3641 	intel_de_write(dev_priv, enable_reg, val);
3642 
3643 	/*
3644 	 * The spec says we need to "wait" but it also says it should be
3645 	 * immediate.
3646 	 */
3647 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3648 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3649 			pll->info->id);
3650 }
3651 
3652 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3653 			      struct intel_shared_dpll *pll)
3654 {
3655 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3656 
3657 	if (IS_ELKHARTLAKE(dev_priv) &&
3658 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3659 		enable_reg = MG_PLL_ENABLE(0);
3660 		icl_pll_disable(dev_priv, pll, enable_reg);
3661 
3662 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3663 					pll->wakeref);
3664 		return;
3665 	}
3666 
3667 	icl_pll_disable(dev_priv, pll, enable_reg);
3668 }
3669 
3670 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3671 			    struct intel_shared_dpll *pll)
3672 {
3673 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3674 }
3675 
3676 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3677 			   struct intel_shared_dpll *pll)
3678 {
3679 	i915_reg_t enable_reg =
3680 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3681 
3682 	icl_pll_disable(dev_priv, pll, enable_reg);
3683 }
3684 
3685 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3686 			      const struct intel_dpll_hw_state *hw_state)
3687 {
3688 	drm_dbg_kms(&dev_priv->drm,
3689 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3690 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3691 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3692 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3693 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3694 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3695 		    hw_state->cfgcr0, hw_state->cfgcr1,
3696 		    hw_state->mg_refclkin_ctl,
3697 		    hw_state->mg_clktop2_coreclkctl1,
3698 		    hw_state->mg_clktop2_hsclkctl,
3699 		    hw_state->mg_pll_div0,
3700 		    hw_state->mg_pll_div1,
3701 		    hw_state->mg_pll_lf,
3702 		    hw_state->mg_pll_frac_lock,
3703 		    hw_state->mg_pll_ssc,
3704 		    hw_state->mg_pll_bias,
3705 		    hw_state->mg_pll_tdc_coldst_bias);
3706 }
3707 
3708 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3709 	.enable = combo_pll_enable,
3710 	.disable = combo_pll_disable,
3711 	.get_hw_state = combo_pll_get_hw_state,
3712 };
3713 
3714 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3715 	.enable = tbt_pll_enable,
3716 	.disable = tbt_pll_disable,
3717 	.get_hw_state = tbt_pll_get_hw_state,
3718 };
3719 
3720 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3721 	.enable = mg_pll_enable,
3722 	.disable = mg_pll_disable,
3723 	.get_hw_state = mg_pll_get_hw_state,
3724 };
3725 
3726 static const struct dpll_info icl_plls[] = {
3727 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3728 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3729 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3730 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3731 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3732 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3733 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3734 	{ },
3735 };
3736 
3737 static const struct intel_dpll_mgr icl_pll_mgr = {
3738 	.dpll_info = icl_plls,
3739 	.get_dplls = icl_get_dplls,
3740 	.put_dplls = icl_put_dplls,
3741 	.update_active_dpll = icl_update_active_dpll,
3742 	.dump_hw_state = icl_dump_hw_state,
3743 };
3744 
3745 static const struct dpll_info ehl_plls[] = {
3746 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3747 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3748 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3749 	{ },
3750 };
3751 
3752 static const struct intel_dpll_mgr ehl_pll_mgr = {
3753 	.dpll_info = ehl_plls,
3754 	.get_dplls = icl_get_dplls,
3755 	.put_dplls = icl_put_dplls,
3756 	.dump_hw_state = icl_dump_hw_state,
3757 };
3758 
3759 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3760 	.enable = mg_pll_enable,
3761 	.disable = mg_pll_disable,
3762 	.get_hw_state = dkl_pll_get_hw_state,
3763 };
3764 
3765 static const struct dpll_info tgl_plls[] = {
3766 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3767 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3768 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3769 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3770 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3771 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3772 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3773 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3774 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3775 	{ },
3776 };
3777 
3778 static const struct intel_dpll_mgr tgl_pll_mgr = {
3779 	.dpll_info = tgl_plls,
3780 	.get_dplls = icl_get_dplls,
3781 	.put_dplls = icl_put_dplls,
3782 	.update_active_dpll = icl_update_active_dpll,
3783 	.dump_hw_state = icl_dump_hw_state,
3784 };
3785 
3786 /**
3787  * intel_shared_dpll_init - Initialize shared DPLLs
3788  * @dev: drm device
3789  *
3790  * Initialize shared DPLLs for @dev.
3791  */
3792 void intel_shared_dpll_init(struct drm_device *dev)
3793 {
3794 	struct drm_i915_private *dev_priv = to_i915(dev);
3795 	const struct intel_dpll_mgr *dpll_mgr = NULL;
3796 	const struct dpll_info *dpll_info;
3797 	int i;
3798 
3799 	if (INTEL_GEN(dev_priv) >= 12)
3800 		dpll_mgr = &tgl_pll_mgr;
3801 	else if (IS_ELKHARTLAKE(dev_priv))
3802 		dpll_mgr = &ehl_pll_mgr;
3803 	else if (INTEL_GEN(dev_priv) >= 11)
3804 		dpll_mgr = &icl_pll_mgr;
3805 	else if (IS_CANNONLAKE(dev_priv))
3806 		dpll_mgr = &cnl_pll_mgr;
3807 	else if (IS_GEN9_BC(dev_priv))
3808 		dpll_mgr = &skl_pll_mgr;
3809 	else if (IS_GEN9_LP(dev_priv))
3810 		dpll_mgr = &bxt_pll_mgr;
3811 	else if (HAS_DDI(dev_priv))
3812 		dpll_mgr = &hsw_pll_mgr;
3813 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3814 		dpll_mgr = &pch_pll_mgr;
3815 
3816 	if (!dpll_mgr) {
3817 		dev_priv->num_shared_dpll = 0;
3818 		return;
3819 	}
3820 
3821 	dpll_info = dpll_mgr->dpll_info;
3822 
3823 	for (i = 0; dpll_info[i].name; i++) {
3824 		drm_WARN_ON(dev, i != dpll_info[i].id);
3825 		dev_priv->shared_dplls[i].info = &dpll_info[i];
3826 	}
3827 
3828 	dev_priv->dpll_mgr = dpll_mgr;
3829 	dev_priv->num_shared_dpll = i;
3830 	mutex_init(&dev_priv->dpll_lock);
3831 
3832 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3833 }
3834 
3835 /**
3836  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3837  * @state: atomic state
3838  * @crtc: CRTC to reserve DPLLs for
3839  * @encoder: encoder
3840  *
3841  * This function reserves all required DPLLs for the given CRTC and encoder
3842  * combination in the current atomic commit @state and the new @crtc atomic
3843  * state.
3844  *
3845  * The new configuration in the atomic commit @state is made effective by
3846  * calling intel_shared_dpll_swap_state().
3847  *
3848  * The reserved DPLLs should be released by calling
3849  * intel_release_shared_dplls().
3850  *
3851  * Returns:
3852  * True if all required DPLLs were successfully reserved.
3853  */
3854 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3855 				struct intel_crtc *crtc,
3856 				struct intel_encoder *encoder)
3857 {
3858 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3859 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3860 
3861 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
3862 		return false;
3863 
3864 	return dpll_mgr->get_dplls(state, crtc, encoder);
3865 }
3866 
3867 /**
3868  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3869  * @state: atomic state
3870  * @crtc: crtc from which the DPLLs are to be released
3871  *
3872  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3873  * from the current atomic commit @state and the old @crtc atomic state.
3874  *
3875  * The new configuration in the atomic commit @state is made effective by
3876  * calling intel_shared_dpll_swap_state().
3877  */
3878 void intel_release_shared_dplls(struct intel_atomic_state *state,
3879 				struct intel_crtc *crtc)
3880 {
3881 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3882 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3883 
3884 	/*
3885 	 * FIXME: this function is called for every platform having a
3886 	 * compute_clock hook, even though the platform doesn't yet support
3887 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3888 	 * called on those.
3889 	 */
3890 	if (!dpll_mgr)
3891 		return;
3892 
3893 	dpll_mgr->put_dplls(state, crtc);
3894 }
3895 
3896 /**
3897  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3898  * @state: atomic state
3899  * @crtc: the CRTC for which to update the active DPLL
3900  * @encoder: encoder determining the type of port DPLL
3901  *
3902  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3903  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3904  * DPLL selected will be based on the current mode of the encoder's port.
3905  */
3906 void intel_update_active_dpll(struct intel_atomic_state *state,
3907 			      struct intel_crtc *crtc,
3908 			      struct intel_encoder *encoder)
3909 {
3910 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3911 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3912 
3913 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
3914 		return;
3915 
3916 	dpll_mgr->update_active_dpll(state, crtc, encoder);
3917 }
3918 
3919 /**
3920  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3921  * @dev_priv: i915 drm device
3922  * @hw_state: hw state to be written to the log
3923  *
3924  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
3925  */
3926 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3927 			      const struct intel_dpll_hw_state *hw_state)
3928 {
3929 	if (dev_priv->dpll_mgr) {
3930 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3931 	} else {
3932 		/* fallback for platforms that don't use the shared dpll
3933 		 * infrastructure
3934 		 */
3935 		drm_dbg_kms(&dev_priv->drm,
3936 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3937 			    "fp0: 0x%x, fp1: 0x%x\n",
3938 			    hw_state->dpll,
3939 			    hw_state->dpll_md,
3940 			    hw_state->fp0,
3941 			    hw_state->fp1);
3942 	}
3943 }
3944