1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 struct intel_dpll_mgr {
49 	const struct dpll_info *dpll_info;
50 
51 	bool (*get_dplls)(struct intel_atomic_state *state,
52 			  struct intel_crtc *crtc,
53 			  struct intel_encoder *encoder);
54 	void (*put_dplls)(struct intel_atomic_state *state,
55 			  struct intel_crtc *crtc);
56 	void (*update_active_dpll)(struct intel_atomic_state *state,
57 				   struct intel_crtc *crtc,
58 				   struct intel_encoder *encoder);
59 	void (*update_ref_clks)(struct drm_i915_private *i915);
60 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
61 			      const struct intel_dpll_hw_state *hw_state);
62 };
63 
64 static void
65 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
66 				  struct intel_shared_dpll_state *shared_dpll)
67 {
68 	enum intel_dpll_id i;
69 
70 	/* Copy shared dpll state */
71 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
72 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
73 
74 		shared_dpll[i] = pll->state;
75 	}
76 }
77 
78 static struct intel_shared_dpll_state *
79 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
80 {
81 	struct intel_atomic_state *state = to_intel_atomic_state(s);
82 
83 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
84 
85 	if (!state->dpll_set) {
86 		state->dpll_set = true;
87 
88 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
89 						  state->shared_dpll);
90 	}
91 
92 	return state->shared_dpll;
93 }
94 
95 /**
96  * intel_get_shared_dpll_by_id - get a DPLL given its id
97  * @dev_priv: i915 device instance
98  * @id: pll id
99  *
100  * Returns:
101  * A pointer to the DPLL with @id
102  */
103 struct intel_shared_dpll *
104 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
105 			    enum intel_dpll_id id)
106 {
107 	return &dev_priv->dpll.shared_dplls[id];
108 }
109 
110 /**
111  * intel_get_shared_dpll_id - get the id of a DPLL
112  * @dev_priv: i915 device instance
113  * @pll: the DPLL
114  *
115  * Returns:
116  * The id of @pll
117  */
118 enum intel_dpll_id
119 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
120 			 struct intel_shared_dpll *pll)
121 {
122 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
123 
124 	if (drm_WARN_ON(&dev_priv->drm,
125 			pll_idx < 0 ||
126 			pll_idx >= dev_priv->dpll.num_shared_dpll))
127 		return -1;
128 
129 	return pll_idx;
130 }
131 
132 /* For ILK+ */
133 void assert_shared_dpll(struct drm_i915_private *dev_priv,
134 			struct intel_shared_dpll *pll,
135 			bool state)
136 {
137 	bool cur_state;
138 	struct intel_dpll_hw_state hw_state;
139 
140 	if (drm_WARN(&dev_priv->drm, !pll,
141 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
142 		return;
143 
144 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
145 	I915_STATE_WARN(cur_state != state,
146 	     "%s assertion failure (expected %s, current %s)\n",
147 			pll->info->name, onoff(state), onoff(cur_state));
148 }
149 
150 static i915_reg_t
151 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
152 			   struct intel_shared_dpll *pll)
153 {
154 
155 	if (IS_ELKHARTLAKE(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
156 		return MG_PLL_ENABLE(0);
157 
158 	return CNL_DPLL_ENABLE(pll->info->id);
159 
160 
161 }
162 /**
163  * intel_prepare_shared_dpll - call a dpll's prepare hook
164  * @crtc_state: CRTC, and its state, which has a shared dpll
165  *
166  * This calls the PLL's prepare hook if it has one and if the PLL is not
167  * already enabled. The prepare hook is platform specific.
168  */
169 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
170 {
171 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
173 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
174 
175 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
176 		return;
177 
178 	mutex_lock(&dev_priv->dpll.lock);
179 	drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
180 	if (!pll->active_mask) {
181 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
182 		drm_WARN_ON(&dev_priv->drm, pll->on);
183 		assert_shared_dpll_disabled(dev_priv, pll);
184 
185 		pll->info->funcs->prepare(dev_priv, pll);
186 	}
187 	mutex_unlock(&dev_priv->dpll.lock);
188 }
189 
190 /**
191  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
192  * @crtc_state: CRTC, and its state, which has a shared DPLL
193  *
194  * Enable the shared DPLL used by @crtc.
195  */
196 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
197 {
198 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
199 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
200 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
201 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
202 	unsigned int old_mask;
203 
204 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
205 		return;
206 
207 	mutex_lock(&dev_priv->dpll.lock);
208 	old_mask = pll->active_mask;
209 
210 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
211 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
212 		goto out;
213 
214 	pll->active_mask |= crtc_mask;
215 
216 	drm_dbg_kms(&dev_priv->drm,
217 		    "enable %s (active %x, on? %d) for crtc %d\n",
218 		    pll->info->name, pll->active_mask, pll->on,
219 		    crtc->base.base.id);
220 
221 	if (old_mask) {
222 		drm_WARN_ON(&dev_priv->drm, !pll->on);
223 		assert_shared_dpll_enabled(dev_priv, pll);
224 		goto out;
225 	}
226 	drm_WARN_ON(&dev_priv->drm, pll->on);
227 
228 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
229 	pll->info->funcs->enable(dev_priv, pll);
230 	pll->on = true;
231 
232 out:
233 	mutex_unlock(&dev_priv->dpll.lock);
234 }
235 
236 /**
237  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
238  * @crtc_state: CRTC, and its state, which has a shared DPLL
239  *
240  * Disable the shared DPLL used by @crtc.
241  */
242 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
243 {
244 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
245 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
246 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
247 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
248 
249 	/* PCH only available on ILK+ */
250 	if (INTEL_GEN(dev_priv) < 5)
251 		return;
252 
253 	if (pll == NULL)
254 		return;
255 
256 	mutex_lock(&dev_priv->dpll.lock);
257 	if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
258 		goto out;
259 
260 	drm_dbg_kms(&dev_priv->drm,
261 		    "disable %s (active %x, on? %d) for crtc %d\n",
262 		    pll->info->name, pll->active_mask, pll->on,
263 		    crtc->base.base.id);
264 
265 	assert_shared_dpll_enabled(dev_priv, pll);
266 	drm_WARN_ON(&dev_priv->drm, !pll->on);
267 
268 	pll->active_mask &= ~crtc_mask;
269 	if (pll->active_mask)
270 		goto out;
271 
272 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
273 	pll->info->funcs->disable(dev_priv, pll);
274 	pll->on = false;
275 
276 out:
277 	mutex_unlock(&dev_priv->dpll.lock);
278 }
279 
280 static struct intel_shared_dpll *
281 intel_find_shared_dpll(struct intel_atomic_state *state,
282 		       const struct intel_crtc *crtc,
283 		       const struct intel_dpll_hw_state *pll_state,
284 		       unsigned long dpll_mask)
285 {
286 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 	struct intel_shared_dpll *pll, *unused_pll = NULL;
288 	struct intel_shared_dpll_state *shared_dpll;
289 	enum intel_dpll_id i;
290 
291 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
292 
293 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
294 
295 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
296 		pll = &dev_priv->dpll.shared_dplls[i];
297 
298 		/* Only want to check enabled timings first */
299 		if (shared_dpll[i].crtc_mask == 0) {
300 			if (!unused_pll)
301 				unused_pll = pll;
302 			continue;
303 		}
304 
305 		if (memcmp(pll_state,
306 			   &shared_dpll[i].hw_state,
307 			   sizeof(*pll_state)) == 0) {
308 			drm_dbg_kms(&dev_priv->drm,
309 				    "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
310 				    crtc->base.base.id, crtc->base.name,
311 				    pll->info->name,
312 				    shared_dpll[i].crtc_mask,
313 				    pll->active_mask);
314 			return pll;
315 		}
316 	}
317 
318 	/* Ok no matching timings, maybe there's a free one? */
319 	if (unused_pll) {
320 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
321 			    crtc->base.base.id, crtc->base.name,
322 			    unused_pll->info->name);
323 		return unused_pll;
324 	}
325 
326 	return NULL;
327 }
328 
329 static void
330 intel_reference_shared_dpll(struct intel_atomic_state *state,
331 			    const struct intel_crtc *crtc,
332 			    const struct intel_shared_dpll *pll,
333 			    const struct intel_dpll_hw_state *pll_state)
334 {
335 	struct drm_i915_private *i915 = to_i915(state->base.dev);
336 	struct intel_shared_dpll_state *shared_dpll;
337 	const enum intel_dpll_id id = pll->info->id;
338 
339 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
340 
341 	if (shared_dpll[id].crtc_mask == 0)
342 		shared_dpll[id].hw_state = *pll_state;
343 
344 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
345 		pipe_name(crtc->pipe));
346 
347 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
348 }
349 
350 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
351 					  const struct intel_crtc *crtc,
352 					  const struct intel_shared_dpll *pll)
353 {
354 	struct intel_shared_dpll_state *shared_dpll;
355 
356 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
357 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
358 }
359 
360 static void intel_put_dpll(struct intel_atomic_state *state,
361 			   struct intel_crtc *crtc)
362 {
363 	const struct intel_crtc_state *old_crtc_state =
364 		intel_atomic_get_old_crtc_state(state, crtc);
365 	struct intel_crtc_state *new_crtc_state =
366 		intel_atomic_get_new_crtc_state(state, crtc);
367 
368 	new_crtc_state->shared_dpll = NULL;
369 
370 	if (!old_crtc_state->shared_dpll)
371 		return;
372 
373 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
374 }
375 
376 /**
377  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
378  * @state: atomic state
379  *
380  * This is the dpll version of drm_atomic_helper_swap_state() since the
381  * helper does not handle driver-specific global state.
382  *
383  * For consistency with atomic helpers this function does a complete swap,
384  * i.e. it also puts the current state into @state, even though there is no
385  * need for that at this moment.
386  */
387 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
388 {
389 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
390 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
391 	enum intel_dpll_id i;
392 
393 	if (!state->dpll_set)
394 		return;
395 
396 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
397 		struct intel_shared_dpll *pll =
398 			&dev_priv->dpll.shared_dplls[i];
399 
400 		swap(pll->state, shared_dpll[i]);
401 	}
402 }
403 
404 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
405 				      struct intel_shared_dpll *pll,
406 				      struct intel_dpll_hw_state *hw_state)
407 {
408 	const enum intel_dpll_id id = pll->info->id;
409 	intel_wakeref_t wakeref;
410 	u32 val;
411 
412 	wakeref = intel_display_power_get_if_enabled(dev_priv,
413 						     POWER_DOMAIN_DISPLAY_CORE);
414 	if (!wakeref)
415 		return false;
416 
417 	val = intel_de_read(dev_priv, PCH_DPLL(id));
418 	hw_state->dpll = val;
419 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
420 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
421 
422 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
423 
424 	return val & DPLL_VCO_ENABLE;
425 }
426 
427 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
428 				 struct intel_shared_dpll *pll)
429 {
430 	const enum intel_dpll_id id = pll->info->id;
431 
432 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
433 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
434 }
435 
436 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
437 {
438 	u32 val;
439 	bool enabled;
440 
441 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
442 
443 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
444 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
445 			    DREF_SUPERSPREAD_SOURCE_MASK));
446 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
447 }
448 
449 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
450 				struct intel_shared_dpll *pll)
451 {
452 	const enum intel_dpll_id id = pll->info->id;
453 
454 	/* PCH refclock must be enabled first */
455 	ibx_assert_pch_refclk_enabled(dev_priv);
456 
457 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
458 
459 	/* Wait for the clocks to stabilize. */
460 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
461 	udelay(150);
462 
463 	/* The pixel multiplier can only be updated once the
464 	 * DPLL is enabled and the clocks are stable.
465 	 *
466 	 * So write it again.
467 	 */
468 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
469 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
470 	udelay(200);
471 }
472 
473 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
474 				 struct intel_shared_dpll *pll)
475 {
476 	const enum intel_dpll_id id = pll->info->id;
477 
478 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
479 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
480 	udelay(200);
481 }
482 
483 static bool ibx_get_dpll(struct intel_atomic_state *state,
484 			 struct intel_crtc *crtc,
485 			 struct intel_encoder *encoder)
486 {
487 	struct intel_crtc_state *crtc_state =
488 		intel_atomic_get_new_crtc_state(state, crtc);
489 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
490 	struct intel_shared_dpll *pll;
491 	enum intel_dpll_id i;
492 
493 	if (HAS_PCH_IBX(dev_priv)) {
494 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
495 		i = (enum intel_dpll_id) crtc->pipe;
496 		pll = &dev_priv->dpll.shared_dplls[i];
497 
498 		drm_dbg_kms(&dev_priv->drm,
499 			    "[CRTC:%d:%s] using pre-allocated %s\n",
500 			    crtc->base.base.id, crtc->base.name,
501 			    pll->info->name);
502 	} else {
503 		pll = intel_find_shared_dpll(state, crtc,
504 					     &crtc_state->dpll_hw_state,
505 					     BIT(DPLL_ID_PCH_PLL_B) |
506 					     BIT(DPLL_ID_PCH_PLL_A));
507 	}
508 
509 	if (!pll)
510 		return false;
511 
512 	/* reference the pll */
513 	intel_reference_shared_dpll(state, crtc,
514 				    pll, &crtc_state->dpll_hw_state);
515 
516 	crtc_state->shared_dpll = pll;
517 
518 	return true;
519 }
520 
521 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
522 			      const struct intel_dpll_hw_state *hw_state)
523 {
524 	drm_dbg_kms(&dev_priv->drm,
525 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
526 		    "fp0: 0x%x, fp1: 0x%x\n",
527 		    hw_state->dpll,
528 		    hw_state->dpll_md,
529 		    hw_state->fp0,
530 		    hw_state->fp1);
531 }
532 
533 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
534 	.prepare = ibx_pch_dpll_prepare,
535 	.enable = ibx_pch_dpll_enable,
536 	.disable = ibx_pch_dpll_disable,
537 	.get_hw_state = ibx_pch_dpll_get_hw_state,
538 };
539 
540 static const struct dpll_info pch_plls[] = {
541 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
542 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
543 	{ },
544 };
545 
546 static const struct intel_dpll_mgr pch_pll_mgr = {
547 	.dpll_info = pch_plls,
548 	.get_dplls = ibx_get_dpll,
549 	.put_dplls = intel_put_dpll,
550 	.dump_hw_state = ibx_dump_hw_state,
551 };
552 
553 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
554 			       struct intel_shared_dpll *pll)
555 {
556 	const enum intel_dpll_id id = pll->info->id;
557 
558 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
559 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
560 	udelay(20);
561 }
562 
563 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
564 				struct intel_shared_dpll *pll)
565 {
566 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
567 	intel_de_posting_read(dev_priv, SPLL_CTL);
568 	udelay(20);
569 }
570 
571 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
572 				  struct intel_shared_dpll *pll)
573 {
574 	const enum intel_dpll_id id = pll->info->id;
575 	u32 val;
576 
577 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
578 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
579 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
580 
581 	/*
582 	 * Try to set up the PCH reference clock once all DPLLs
583 	 * that depend on it have been shut down.
584 	 */
585 	if (dev_priv->pch_ssc_use & BIT(id))
586 		intel_init_pch_refclk(dev_priv);
587 }
588 
589 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
590 				 struct intel_shared_dpll *pll)
591 {
592 	enum intel_dpll_id id = pll->info->id;
593 	u32 val;
594 
595 	val = intel_de_read(dev_priv, SPLL_CTL);
596 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
597 	intel_de_posting_read(dev_priv, SPLL_CTL);
598 
599 	/*
600 	 * Try to set up the PCH reference clock once all DPLLs
601 	 * that depend on it have been shut down.
602 	 */
603 	if (dev_priv->pch_ssc_use & BIT(id))
604 		intel_init_pch_refclk(dev_priv);
605 }
606 
607 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
608 				       struct intel_shared_dpll *pll,
609 				       struct intel_dpll_hw_state *hw_state)
610 {
611 	const enum intel_dpll_id id = pll->info->id;
612 	intel_wakeref_t wakeref;
613 	u32 val;
614 
615 	wakeref = intel_display_power_get_if_enabled(dev_priv,
616 						     POWER_DOMAIN_DISPLAY_CORE);
617 	if (!wakeref)
618 		return false;
619 
620 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
621 	hw_state->wrpll = val;
622 
623 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
624 
625 	return val & WRPLL_PLL_ENABLE;
626 }
627 
628 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
629 				      struct intel_shared_dpll *pll,
630 				      struct intel_dpll_hw_state *hw_state)
631 {
632 	intel_wakeref_t wakeref;
633 	u32 val;
634 
635 	wakeref = intel_display_power_get_if_enabled(dev_priv,
636 						     POWER_DOMAIN_DISPLAY_CORE);
637 	if (!wakeref)
638 		return false;
639 
640 	val = intel_de_read(dev_priv, SPLL_CTL);
641 	hw_state->spll = val;
642 
643 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
644 
645 	return val & SPLL_PLL_ENABLE;
646 }
647 
648 #define LC_FREQ 2700
649 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
650 
651 #define P_MIN 2
652 #define P_MAX 64
653 #define P_INC 2
654 
655 /* Constraints for PLL good behavior */
656 #define REF_MIN 48
657 #define REF_MAX 400
658 #define VCO_MIN 2400
659 #define VCO_MAX 4800
660 
661 struct hsw_wrpll_rnp {
662 	unsigned p, n2, r2;
663 };
664 
665 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
666 {
667 	unsigned budget;
668 
669 	switch (clock) {
670 	case 25175000:
671 	case 25200000:
672 	case 27000000:
673 	case 27027000:
674 	case 37762500:
675 	case 37800000:
676 	case 40500000:
677 	case 40541000:
678 	case 54000000:
679 	case 54054000:
680 	case 59341000:
681 	case 59400000:
682 	case 72000000:
683 	case 74176000:
684 	case 74250000:
685 	case 81000000:
686 	case 81081000:
687 	case 89012000:
688 	case 89100000:
689 	case 108000000:
690 	case 108108000:
691 	case 111264000:
692 	case 111375000:
693 	case 148352000:
694 	case 148500000:
695 	case 162000000:
696 	case 162162000:
697 	case 222525000:
698 	case 222750000:
699 	case 296703000:
700 	case 297000000:
701 		budget = 0;
702 		break;
703 	case 233500000:
704 	case 245250000:
705 	case 247750000:
706 	case 253250000:
707 	case 298000000:
708 		budget = 1500;
709 		break;
710 	case 169128000:
711 	case 169500000:
712 	case 179500000:
713 	case 202000000:
714 		budget = 2000;
715 		break;
716 	case 256250000:
717 	case 262500000:
718 	case 270000000:
719 	case 272500000:
720 	case 273750000:
721 	case 280750000:
722 	case 281250000:
723 	case 286000000:
724 	case 291750000:
725 		budget = 4000;
726 		break;
727 	case 267250000:
728 	case 268500000:
729 		budget = 5000;
730 		break;
731 	default:
732 		budget = 1000;
733 		break;
734 	}
735 
736 	return budget;
737 }
738 
739 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
740 				 unsigned int r2, unsigned int n2,
741 				 unsigned int p,
742 				 struct hsw_wrpll_rnp *best)
743 {
744 	u64 a, b, c, d, diff, diff_best;
745 
746 	/* No best (r,n,p) yet */
747 	if (best->p == 0) {
748 		best->p = p;
749 		best->n2 = n2;
750 		best->r2 = r2;
751 		return;
752 	}
753 
754 	/*
755 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
756 	 * freq2k.
757 	 *
758 	 * delta = 1e6 *
759 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
760 	 *	   freq2k;
761 	 *
762 	 * and we would like delta <= budget.
763 	 *
764 	 * If the discrepancy is above the PPM-based budget, always prefer to
765 	 * improve upon the previous solution.  However, if you're within the
766 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
767 	 */
768 	a = freq2k * budget * p * r2;
769 	b = freq2k * budget * best->p * best->r2;
770 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
771 	diff_best = abs_diff(freq2k * best->p * best->r2,
772 			     LC_FREQ_2K * best->n2);
773 	c = 1000000 * diff;
774 	d = 1000000 * diff_best;
775 
776 	if (a < c && b < d) {
777 		/* If both are above the budget, pick the closer */
778 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
779 			best->p = p;
780 			best->n2 = n2;
781 			best->r2 = r2;
782 		}
783 	} else if (a >= c && b < d) {
784 		/* If A is below the threshold but B is above it?  Update. */
785 		best->p = p;
786 		best->n2 = n2;
787 		best->r2 = r2;
788 	} else if (a >= c && b >= d) {
789 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
790 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
791 			best->p = p;
792 			best->n2 = n2;
793 			best->r2 = r2;
794 		}
795 	}
796 	/* Otherwise a < c && b >= d, do nothing */
797 }
798 
799 static void
800 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
801 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
802 {
803 	u64 freq2k;
804 	unsigned p, n2, r2;
805 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
806 	unsigned budget;
807 
808 	freq2k = clock / 100;
809 
810 	budget = hsw_wrpll_get_budget_for_freq(clock);
811 
812 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
813 	 * and directly pass the LC PLL to it. */
814 	if (freq2k == 5400000) {
815 		*n2_out = 2;
816 		*p_out = 1;
817 		*r2_out = 2;
818 		return;
819 	}
820 
821 	/*
822 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
823 	 * the WR PLL.
824 	 *
825 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
826 	 * Injecting R2 = 2 * R gives:
827 	 *   REF_MAX * r2 > LC_FREQ * 2 and
828 	 *   REF_MIN * r2 < LC_FREQ * 2
829 	 *
830 	 * Which means the desired boundaries for r2 are:
831 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
832 	 *
833 	 */
834 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
835 	     r2 <= LC_FREQ * 2 / REF_MIN;
836 	     r2++) {
837 
838 		/*
839 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
840 		 *
841 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
842 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
843 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
844 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
845 		 *
846 		 * Which means the desired boundaries for n2 are:
847 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
848 		 */
849 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
850 		     n2 <= VCO_MAX * r2 / LC_FREQ;
851 		     n2++) {
852 
853 			for (p = P_MIN; p <= P_MAX; p += P_INC)
854 				hsw_wrpll_update_rnp(freq2k, budget,
855 						     r2, n2, p, &best);
856 		}
857 	}
858 
859 	*n2_out = best.n2;
860 	*p_out = best.p;
861 	*r2_out = best.r2;
862 }
863 
864 static struct intel_shared_dpll *
865 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
866 		       struct intel_crtc *crtc)
867 {
868 	struct intel_crtc_state *crtc_state =
869 		intel_atomic_get_new_crtc_state(state, crtc);
870 	struct intel_shared_dpll *pll;
871 	u32 val;
872 	unsigned int p, n2, r2;
873 
874 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
875 
876 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
877 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
878 	      WRPLL_DIVIDER_POST(p);
879 
880 	crtc_state->dpll_hw_state.wrpll = val;
881 
882 	pll = intel_find_shared_dpll(state, crtc,
883 				     &crtc_state->dpll_hw_state,
884 				     BIT(DPLL_ID_WRPLL2) |
885 				     BIT(DPLL_ID_WRPLL1));
886 
887 	if (!pll)
888 		return NULL;
889 
890 	return pll;
891 }
892 
893 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
894 				  const struct intel_shared_dpll *pll)
895 {
896 	int refclk;
897 	int n, p, r;
898 	u32 wrpll = pll->state.hw_state.wrpll;
899 
900 	switch (wrpll & WRPLL_REF_MASK) {
901 	case WRPLL_REF_SPECIAL_HSW:
902 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
903 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
904 			refclk = dev_priv->dpll.ref_clks.nssc;
905 			break;
906 		}
907 		fallthrough;
908 	case WRPLL_REF_PCH_SSC:
909 		/*
910 		 * We could calculate spread here, but our checking
911 		 * code only cares about 5% accuracy, and spread is a max of
912 		 * 0.5% downspread.
913 		 */
914 		refclk = dev_priv->dpll.ref_clks.ssc;
915 		break;
916 	case WRPLL_REF_LCPLL:
917 		refclk = 2700000;
918 		break;
919 	default:
920 		MISSING_CASE(wrpll);
921 		return 0;
922 	}
923 
924 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
925 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
926 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
927 
928 	/* Convert to KHz, p & r have a fixed point portion */
929 	return (refclk * n / 10) / (p * r) * 2;
930 }
931 
932 static struct intel_shared_dpll *
933 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
934 {
935 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
936 	struct intel_shared_dpll *pll;
937 	enum intel_dpll_id pll_id;
938 	int clock = crtc_state->port_clock;
939 
940 	switch (clock / 2) {
941 	case 81000:
942 		pll_id = DPLL_ID_LCPLL_810;
943 		break;
944 	case 135000:
945 		pll_id = DPLL_ID_LCPLL_1350;
946 		break;
947 	case 270000:
948 		pll_id = DPLL_ID_LCPLL_2700;
949 		break;
950 	default:
951 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
952 			    clock);
953 		return NULL;
954 	}
955 
956 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
957 
958 	if (!pll)
959 		return NULL;
960 
961 	return pll;
962 }
963 
964 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
965 				  const struct intel_shared_dpll *pll)
966 {
967 	int link_clock = 0;
968 
969 	switch (pll->info->id) {
970 	case DPLL_ID_LCPLL_810:
971 		link_clock = 81000;
972 		break;
973 	case DPLL_ID_LCPLL_1350:
974 		link_clock = 135000;
975 		break;
976 	case DPLL_ID_LCPLL_2700:
977 		link_clock = 270000;
978 		break;
979 	default:
980 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
981 		break;
982 	}
983 
984 	return link_clock * 2;
985 }
986 
987 static struct intel_shared_dpll *
988 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
989 		      struct intel_crtc *crtc)
990 {
991 	struct intel_crtc_state *crtc_state =
992 		intel_atomic_get_new_crtc_state(state, crtc);
993 
994 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
995 		return NULL;
996 
997 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
998 					 SPLL_REF_MUXED_SSC;
999 
1000 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1001 				      BIT(DPLL_ID_SPLL));
1002 }
1003 
1004 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1005 				 const struct intel_shared_dpll *pll)
1006 {
1007 	int link_clock = 0;
1008 
1009 	switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
1010 	case SPLL_FREQ_810MHz:
1011 		link_clock = 81000;
1012 		break;
1013 	case SPLL_FREQ_1350MHz:
1014 		link_clock = 135000;
1015 		break;
1016 	case SPLL_FREQ_2700MHz:
1017 		link_clock = 270000;
1018 		break;
1019 	default:
1020 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1021 		break;
1022 	}
1023 
1024 	return link_clock * 2;
1025 }
1026 
1027 static bool hsw_get_dpll(struct intel_atomic_state *state,
1028 			 struct intel_crtc *crtc,
1029 			 struct intel_encoder *encoder)
1030 {
1031 	struct intel_crtc_state *crtc_state =
1032 		intel_atomic_get_new_crtc_state(state, crtc);
1033 	struct intel_shared_dpll *pll;
1034 
1035 	memset(&crtc_state->dpll_hw_state, 0,
1036 	       sizeof(crtc_state->dpll_hw_state));
1037 
1038 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1039 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1040 	else if (intel_crtc_has_dp_encoder(crtc_state))
1041 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1042 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1043 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1044 	else
1045 		return false;
1046 
1047 	if (!pll)
1048 		return false;
1049 
1050 	intel_reference_shared_dpll(state, crtc,
1051 				    pll, &crtc_state->dpll_hw_state);
1052 
1053 	crtc_state->shared_dpll = pll;
1054 
1055 	return true;
1056 }
1057 
1058 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1059 {
1060 	i915->dpll.ref_clks.ssc = 135000;
1061 	/* Non-SSC is only used on non-ULT HSW. */
1062 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1063 		i915->dpll.ref_clks.nssc = 24000;
1064 	else
1065 		i915->dpll.ref_clks.nssc = 135000;
1066 }
1067 
1068 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1069 			      const struct intel_dpll_hw_state *hw_state)
1070 {
1071 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1072 		    hw_state->wrpll, hw_state->spll);
1073 }
1074 
1075 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1076 	.enable = hsw_ddi_wrpll_enable,
1077 	.disable = hsw_ddi_wrpll_disable,
1078 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1079 	.get_freq = hsw_ddi_wrpll_get_freq,
1080 };
1081 
1082 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1083 	.enable = hsw_ddi_spll_enable,
1084 	.disable = hsw_ddi_spll_disable,
1085 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1086 	.get_freq = hsw_ddi_spll_get_freq,
1087 };
1088 
1089 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1090 				 struct intel_shared_dpll *pll)
1091 {
1092 }
1093 
1094 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1095 				  struct intel_shared_dpll *pll)
1096 {
1097 }
1098 
1099 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1100 				       struct intel_shared_dpll *pll,
1101 				       struct intel_dpll_hw_state *hw_state)
1102 {
1103 	return true;
1104 }
1105 
1106 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1107 	.enable = hsw_ddi_lcpll_enable,
1108 	.disable = hsw_ddi_lcpll_disable,
1109 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1110 	.get_freq = hsw_ddi_lcpll_get_freq,
1111 };
1112 
1113 static const struct dpll_info hsw_plls[] = {
1114 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1115 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1116 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1117 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1118 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1119 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1120 	{ },
1121 };
1122 
1123 static const struct intel_dpll_mgr hsw_pll_mgr = {
1124 	.dpll_info = hsw_plls,
1125 	.get_dplls = hsw_get_dpll,
1126 	.put_dplls = intel_put_dpll,
1127 	.update_ref_clks = hsw_update_dpll_ref_clks,
1128 	.dump_hw_state = hsw_dump_hw_state,
1129 };
1130 
1131 struct skl_dpll_regs {
1132 	i915_reg_t ctl, cfgcr1, cfgcr2;
1133 };
1134 
1135 /* this array is indexed by the *shared* pll id */
1136 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1137 	{
1138 		/* DPLL 0 */
1139 		.ctl = LCPLL1_CTL,
1140 		/* DPLL 0 doesn't support HDMI mode */
1141 	},
1142 	{
1143 		/* DPLL 1 */
1144 		.ctl = LCPLL2_CTL,
1145 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1146 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1147 	},
1148 	{
1149 		/* DPLL 2 */
1150 		.ctl = WRPLL_CTL(0),
1151 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1152 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1153 	},
1154 	{
1155 		/* DPLL 3 */
1156 		.ctl = WRPLL_CTL(1),
1157 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1158 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1159 	},
1160 };
1161 
1162 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1163 				    struct intel_shared_dpll *pll)
1164 {
1165 	const enum intel_dpll_id id = pll->info->id;
1166 	u32 val;
1167 
1168 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1169 
1170 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1171 		 DPLL_CTRL1_SSC(id) |
1172 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1173 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1174 
1175 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1176 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1177 }
1178 
1179 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1180 			       struct intel_shared_dpll *pll)
1181 {
1182 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1183 	const enum intel_dpll_id id = pll->info->id;
1184 
1185 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1186 
1187 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1188 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1189 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1190 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1191 
1192 	/* the enable bit is always bit 31 */
1193 	intel_de_write(dev_priv, regs[id].ctl,
1194 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1195 
1196 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1197 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1198 }
1199 
1200 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1201 				 struct intel_shared_dpll *pll)
1202 {
1203 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1204 }
1205 
1206 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1207 				struct intel_shared_dpll *pll)
1208 {
1209 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1210 	const enum intel_dpll_id id = pll->info->id;
1211 
1212 	/* the enable bit is always bit 31 */
1213 	intel_de_write(dev_priv, regs[id].ctl,
1214 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1215 	intel_de_posting_read(dev_priv, regs[id].ctl);
1216 }
1217 
1218 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1219 				  struct intel_shared_dpll *pll)
1220 {
1221 }
1222 
1223 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1224 				     struct intel_shared_dpll *pll,
1225 				     struct intel_dpll_hw_state *hw_state)
1226 {
1227 	u32 val;
1228 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1229 	const enum intel_dpll_id id = pll->info->id;
1230 	intel_wakeref_t wakeref;
1231 	bool ret;
1232 
1233 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1234 						     POWER_DOMAIN_DISPLAY_CORE);
1235 	if (!wakeref)
1236 		return false;
1237 
1238 	ret = false;
1239 
1240 	val = intel_de_read(dev_priv, regs[id].ctl);
1241 	if (!(val & LCPLL_PLL_ENABLE))
1242 		goto out;
1243 
1244 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1245 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1246 
1247 	/* avoid reading back stale values if HDMI mode is not enabled */
1248 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1249 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1250 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1251 	}
1252 	ret = true;
1253 
1254 out:
1255 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1256 
1257 	return ret;
1258 }
1259 
1260 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1261 				       struct intel_shared_dpll *pll,
1262 				       struct intel_dpll_hw_state *hw_state)
1263 {
1264 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1265 	const enum intel_dpll_id id = pll->info->id;
1266 	intel_wakeref_t wakeref;
1267 	u32 val;
1268 	bool ret;
1269 
1270 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1271 						     POWER_DOMAIN_DISPLAY_CORE);
1272 	if (!wakeref)
1273 		return false;
1274 
1275 	ret = false;
1276 
1277 	/* DPLL0 is always enabled since it drives CDCLK */
1278 	val = intel_de_read(dev_priv, regs[id].ctl);
1279 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1280 		goto out;
1281 
1282 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1283 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1284 
1285 	ret = true;
1286 
1287 out:
1288 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1289 
1290 	return ret;
1291 }
1292 
1293 struct skl_wrpll_context {
1294 	u64 min_deviation;		/* current minimal deviation */
1295 	u64 central_freq;		/* chosen central freq */
1296 	u64 dco_freq;			/* chosen dco freq */
1297 	unsigned int p;			/* chosen divider */
1298 };
1299 
1300 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1301 {
1302 	memset(ctx, 0, sizeof(*ctx));
1303 
1304 	ctx->min_deviation = U64_MAX;
1305 }
1306 
1307 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1308 #define SKL_DCO_MAX_PDEVIATION	100
1309 #define SKL_DCO_MAX_NDEVIATION	600
1310 
1311 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1312 				  u64 central_freq,
1313 				  u64 dco_freq,
1314 				  unsigned int divider)
1315 {
1316 	u64 deviation;
1317 
1318 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1319 			      central_freq);
1320 
1321 	/* positive deviation */
1322 	if (dco_freq >= central_freq) {
1323 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1324 		    deviation < ctx->min_deviation) {
1325 			ctx->min_deviation = deviation;
1326 			ctx->central_freq = central_freq;
1327 			ctx->dco_freq = dco_freq;
1328 			ctx->p = divider;
1329 		}
1330 	/* negative deviation */
1331 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1332 		   deviation < ctx->min_deviation) {
1333 		ctx->min_deviation = deviation;
1334 		ctx->central_freq = central_freq;
1335 		ctx->dco_freq = dco_freq;
1336 		ctx->p = divider;
1337 	}
1338 }
1339 
1340 static void skl_wrpll_get_multipliers(unsigned int p,
1341 				      unsigned int *p0 /* out */,
1342 				      unsigned int *p1 /* out */,
1343 				      unsigned int *p2 /* out */)
1344 {
1345 	/* even dividers */
1346 	if (p % 2 == 0) {
1347 		unsigned int half = p / 2;
1348 
1349 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1350 			*p0 = 2;
1351 			*p1 = 1;
1352 			*p2 = half;
1353 		} else if (half % 2 == 0) {
1354 			*p0 = 2;
1355 			*p1 = half / 2;
1356 			*p2 = 2;
1357 		} else if (half % 3 == 0) {
1358 			*p0 = 3;
1359 			*p1 = half / 3;
1360 			*p2 = 2;
1361 		} else if (half % 7 == 0) {
1362 			*p0 = 7;
1363 			*p1 = half / 7;
1364 			*p2 = 2;
1365 		}
1366 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1367 		*p0 = 3;
1368 		*p1 = 1;
1369 		*p2 = p / 3;
1370 	} else if (p == 5 || p == 7) {
1371 		*p0 = p;
1372 		*p1 = 1;
1373 		*p2 = 1;
1374 	} else if (p == 15) {
1375 		*p0 = 3;
1376 		*p1 = 1;
1377 		*p2 = 5;
1378 	} else if (p == 21) {
1379 		*p0 = 7;
1380 		*p1 = 1;
1381 		*p2 = 3;
1382 	} else if (p == 35) {
1383 		*p0 = 7;
1384 		*p1 = 1;
1385 		*p2 = 5;
1386 	}
1387 }
1388 
1389 struct skl_wrpll_params {
1390 	u32 dco_fraction;
1391 	u32 dco_integer;
1392 	u32 qdiv_ratio;
1393 	u32 qdiv_mode;
1394 	u32 kdiv;
1395 	u32 pdiv;
1396 	u32 central_freq;
1397 };
1398 
1399 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1400 				      u64 afe_clock,
1401 				      int ref_clock,
1402 				      u64 central_freq,
1403 				      u32 p0, u32 p1, u32 p2)
1404 {
1405 	u64 dco_freq;
1406 
1407 	switch (central_freq) {
1408 	case 9600000000ULL:
1409 		params->central_freq = 0;
1410 		break;
1411 	case 9000000000ULL:
1412 		params->central_freq = 1;
1413 		break;
1414 	case 8400000000ULL:
1415 		params->central_freq = 3;
1416 	}
1417 
1418 	switch (p0) {
1419 	case 1:
1420 		params->pdiv = 0;
1421 		break;
1422 	case 2:
1423 		params->pdiv = 1;
1424 		break;
1425 	case 3:
1426 		params->pdiv = 2;
1427 		break;
1428 	case 7:
1429 		params->pdiv = 4;
1430 		break;
1431 	default:
1432 		WARN(1, "Incorrect PDiv\n");
1433 	}
1434 
1435 	switch (p2) {
1436 	case 5:
1437 		params->kdiv = 0;
1438 		break;
1439 	case 2:
1440 		params->kdiv = 1;
1441 		break;
1442 	case 3:
1443 		params->kdiv = 2;
1444 		break;
1445 	case 1:
1446 		params->kdiv = 3;
1447 		break;
1448 	default:
1449 		WARN(1, "Incorrect KDiv\n");
1450 	}
1451 
1452 	params->qdiv_ratio = p1;
1453 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1454 
1455 	dco_freq = p0 * p1 * p2 * afe_clock;
1456 
1457 	/*
1458 	 * Intermediate values are in Hz.
1459 	 * Divide by MHz to match bsepc
1460 	 */
1461 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1462 	params->dco_fraction =
1463 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1464 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1465 }
1466 
1467 static bool
1468 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1469 			int ref_clock,
1470 			struct skl_wrpll_params *wrpll_params)
1471 {
1472 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1473 	u64 dco_central_freq[3] = { 8400000000ULL,
1474 				    9000000000ULL,
1475 				    9600000000ULL };
1476 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1477 					     24, 28, 30, 32, 36, 40, 42, 44,
1478 					     48, 52, 54, 56, 60, 64, 66, 68,
1479 					     70, 72, 76, 78, 80, 84, 88, 90,
1480 					     92, 96, 98 };
1481 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1482 	static const struct {
1483 		const int *list;
1484 		int n_dividers;
1485 	} dividers[] = {
1486 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1487 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1488 	};
1489 	struct skl_wrpll_context ctx;
1490 	unsigned int dco, d, i;
1491 	unsigned int p0, p1, p2;
1492 
1493 	skl_wrpll_context_init(&ctx);
1494 
1495 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1496 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1497 			for (i = 0; i < dividers[d].n_dividers; i++) {
1498 				unsigned int p = dividers[d].list[i];
1499 				u64 dco_freq = p * afe_clock;
1500 
1501 				skl_wrpll_try_divider(&ctx,
1502 						      dco_central_freq[dco],
1503 						      dco_freq,
1504 						      p);
1505 				/*
1506 				 * Skip the remaining dividers if we're sure to
1507 				 * have found the definitive divider, we can't
1508 				 * improve a 0 deviation.
1509 				 */
1510 				if (ctx.min_deviation == 0)
1511 					goto skip_remaining_dividers;
1512 			}
1513 		}
1514 
1515 skip_remaining_dividers:
1516 		/*
1517 		 * If a solution is found with an even divider, prefer
1518 		 * this one.
1519 		 */
1520 		if (d == 0 && ctx.p)
1521 			break;
1522 	}
1523 
1524 	if (!ctx.p) {
1525 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1526 		return false;
1527 	}
1528 
1529 	/*
1530 	 * gcc incorrectly analyses that these can be used without being
1531 	 * initialized. To be fair, it's hard to guess.
1532 	 */
1533 	p0 = p1 = p2 = 0;
1534 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1535 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1536 				  ctx.central_freq, p0, p1, p2);
1537 
1538 	return true;
1539 }
1540 
1541 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1542 {
1543 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1544 	u32 ctrl1, cfgcr1, cfgcr2;
1545 	struct skl_wrpll_params wrpll_params = { 0, };
1546 
1547 	/*
1548 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1549 	 * as the DPLL id in this function.
1550 	 */
1551 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1552 
1553 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1554 
1555 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1556 				     i915->dpll.ref_clks.nssc,
1557 				     &wrpll_params))
1558 		return false;
1559 
1560 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1561 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1562 		wrpll_params.dco_integer;
1563 
1564 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1565 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1566 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1567 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1568 		wrpll_params.central_freq;
1569 
1570 	memset(&crtc_state->dpll_hw_state, 0,
1571 	       sizeof(crtc_state->dpll_hw_state));
1572 
1573 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1574 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1575 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1576 	return true;
1577 }
1578 
1579 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1580 				  const struct intel_shared_dpll *pll)
1581 {
1582 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
1583 	int ref_clock = i915->dpll.ref_clks.nssc;
1584 	u32 p0, p1, p2, dco_freq;
1585 
1586 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1587 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1588 
1589 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1590 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1591 	else
1592 		p1 = 1;
1593 
1594 
1595 	switch (p0) {
1596 	case DPLL_CFGCR2_PDIV_1:
1597 		p0 = 1;
1598 		break;
1599 	case DPLL_CFGCR2_PDIV_2:
1600 		p0 = 2;
1601 		break;
1602 	case DPLL_CFGCR2_PDIV_3:
1603 		p0 = 3;
1604 		break;
1605 	case DPLL_CFGCR2_PDIV_7:
1606 		p0 = 7;
1607 		break;
1608 	}
1609 
1610 	switch (p2) {
1611 	case DPLL_CFGCR2_KDIV_5:
1612 		p2 = 5;
1613 		break;
1614 	case DPLL_CFGCR2_KDIV_2:
1615 		p2 = 2;
1616 		break;
1617 	case DPLL_CFGCR2_KDIV_3:
1618 		p2 = 3;
1619 		break;
1620 	case DPLL_CFGCR2_KDIV_1:
1621 		p2 = 1;
1622 		break;
1623 	}
1624 
1625 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1626 		   ref_clock;
1627 
1628 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1629 		    ref_clock / 0x8000;
1630 
1631 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1632 		return 0;
1633 
1634 	return dco_freq / (p0 * p1 * p2 * 5);
1635 }
1636 
1637 static bool
1638 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1639 {
1640 	u32 ctrl1;
1641 
1642 	/*
1643 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1644 	 * as the DPLL id in this function.
1645 	 */
1646 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1647 	switch (crtc_state->port_clock / 2) {
1648 	case 81000:
1649 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1650 		break;
1651 	case 135000:
1652 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1653 		break;
1654 	case 270000:
1655 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1656 		break;
1657 		/* eDP 1.4 rates */
1658 	case 162000:
1659 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1660 		break;
1661 	case 108000:
1662 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1663 		break;
1664 	case 216000:
1665 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1666 		break;
1667 	}
1668 
1669 	memset(&crtc_state->dpll_hw_state, 0,
1670 	       sizeof(crtc_state->dpll_hw_state));
1671 
1672 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1673 
1674 	return true;
1675 }
1676 
1677 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1678 				  const struct intel_shared_dpll *pll)
1679 {
1680 	int link_clock = 0;
1681 
1682 	switch ((pll->state.hw_state.ctrl1 &
1683 		 DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1684 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1685 	case DPLL_CTRL1_LINK_RATE_810:
1686 		link_clock = 81000;
1687 		break;
1688 	case DPLL_CTRL1_LINK_RATE_1080:
1689 		link_clock = 108000;
1690 		break;
1691 	case DPLL_CTRL1_LINK_RATE_1350:
1692 		link_clock = 135000;
1693 		break;
1694 	case DPLL_CTRL1_LINK_RATE_1620:
1695 		link_clock = 162000;
1696 		break;
1697 	case DPLL_CTRL1_LINK_RATE_2160:
1698 		link_clock = 216000;
1699 		break;
1700 	case DPLL_CTRL1_LINK_RATE_2700:
1701 		link_clock = 270000;
1702 		break;
1703 	default:
1704 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1705 		break;
1706 	}
1707 
1708 	return link_clock * 2;
1709 }
1710 
1711 static bool skl_get_dpll(struct intel_atomic_state *state,
1712 			 struct intel_crtc *crtc,
1713 			 struct intel_encoder *encoder)
1714 {
1715 	struct intel_crtc_state *crtc_state =
1716 		intel_atomic_get_new_crtc_state(state, crtc);
1717 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1718 	struct intel_shared_dpll *pll;
1719 	bool bret;
1720 
1721 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1722 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1723 		if (!bret) {
1724 			drm_dbg_kms(&i915->drm,
1725 				    "Could not get HDMI pll dividers.\n");
1726 			return false;
1727 		}
1728 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1729 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1730 		if (!bret) {
1731 			drm_dbg_kms(&i915->drm,
1732 				    "Could not set DP dpll HW state.\n");
1733 			return false;
1734 		}
1735 	} else {
1736 		return false;
1737 	}
1738 
1739 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1740 		pll = intel_find_shared_dpll(state, crtc,
1741 					     &crtc_state->dpll_hw_state,
1742 					     BIT(DPLL_ID_SKL_DPLL0));
1743 	else
1744 		pll = intel_find_shared_dpll(state, crtc,
1745 					     &crtc_state->dpll_hw_state,
1746 					     BIT(DPLL_ID_SKL_DPLL3) |
1747 					     BIT(DPLL_ID_SKL_DPLL2) |
1748 					     BIT(DPLL_ID_SKL_DPLL1));
1749 	if (!pll)
1750 		return false;
1751 
1752 	intel_reference_shared_dpll(state, crtc,
1753 				    pll, &crtc_state->dpll_hw_state);
1754 
1755 	crtc_state->shared_dpll = pll;
1756 
1757 	return true;
1758 }
1759 
1760 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1761 				const struct intel_shared_dpll *pll)
1762 {
1763 	/*
1764 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1765 	 * the internal shift for each field
1766 	 */
1767 	if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1768 		return skl_ddi_wrpll_get_freq(i915, pll);
1769 	else
1770 		return skl_ddi_lcpll_get_freq(i915, pll);
1771 }
1772 
1773 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1774 {
1775 	/* No SSC ref */
1776 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1777 }
1778 
1779 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1780 			      const struct intel_dpll_hw_state *hw_state)
1781 {
1782 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1783 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1784 		      hw_state->ctrl1,
1785 		      hw_state->cfgcr1,
1786 		      hw_state->cfgcr2);
1787 }
1788 
1789 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1790 	.enable = skl_ddi_pll_enable,
1791 	.disable = skl_ddi_pll_disable,
1792 	.get_hw_state = skl_ddi_pll_get_hw_state,
1793 	.get_freq = skl_ddi_pll_get_freq,
1794 };
1795 
1796 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1797 	.enable = skl_ddi_dpll0_enable,
1798 	.disable = skl_ddi_dpll0_disable,
1799 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1800 	.get_freq = skl_ddi_pll_get_freq,
1801 };
1802 
1803 static const struct dpll_info skl_plls[] = {
1804 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1805 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1806 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1807 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1808 	{ },
1809 };
1810 
1811 static const struct intel_dpll_mgr skl_pll_mgr = {
1812 	.dpll_info = skl_plls,
1813 	.get_dplls = skl_get_dpll,
1814 	.put_dplls = intel_put_dpll,
1815 	.update_ref_clks = skl_update_dpll_ref_clks,
1816 	.dump_hw_state = skl_dump_hw_state,
1817 };
1818 
1819 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1820 				struct intel_shared_dpll *pll)
1821 {
1822 	u32 temp;
1823 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1824 	enum dpio_phy phy;
1825 	enum dpio_channel ch;
1826 
1827 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1828 
1829 	/* Non-SSC reference */
1830 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1831 	temp |= PORT_PLL_REF_SEL;
1832 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1833 
1834 	if (IS_GEMINILAKE(dev_priv)) {
1835 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1836 		temp |= PORT_PLL_POWER_ENABLE;
1837 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1838 
1839 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1840 				 PORT_PLL_POWER_STATE), 200))
1841 			drm_err(&dev_priv->drm,
1842 				"Power state not set for PLL:%d\n", port);
1843 	}
1844 
1845 	/* Disable 10 bit clock */
1846 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1847 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1848 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1849 
1850 	/* Write P1 & P2 */
1851 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1852 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1853 	temp |= pll->state.hw_state.ebb0;
1854 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1855 
1856 	/* Write M2 integer */
1857 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1858 	temp &= ~PORT_PLL_M2_MASK;
1859 	temp |= pll->state.hw_state.pll0;
1860 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1861 
1862 	/* Write N */
1863 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1864 	temp &= ~PORT_PLL_N_MASK;
1865 	temp |= pll->state.hw_state.pll1;
1866 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1867 
1868 	/* Write M2 fraction */
1869 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1870 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1871 	temp |= pll->state.hw_state.pll2;
1872 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1873 
1874 	/* Write M2 fraction enable */
1875 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1876 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1877 	temp |= pll->state.hw_state.pll3;
1878 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1879 
1880 	/* Write coeff */
1881 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1882 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1883 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1884 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1885 	temp |= pll->state.hw_state.pll6;
1886 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1887 
1888 	/* Write calibration val */
1889 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1890 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1891 	temp |= pll->state.hw_state.pll8;
1892 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1893 
1894 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1895 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1896 	temp |= pll->state.hw_state.pll9;
1897 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1898 
1899 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1900 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1901 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1902 	temp |= pll->state.hw_state.pll10;
1903 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1904 
1905 	/* Recalibrate with new settings */
1906 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1907 	temp |= PORT_PLL_RECALIBRATE;
1908 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1909 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1910 	temp |= pll->state.hw_state.ebb4;
1911 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1912 
1913 	/* Enable PLL */
1914 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1915 	temp |= PORT_PLL_ENABLE;
1916 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1917 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1918 
1919 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1920 			200))
1921 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1922 
1923 	if (IS_GEMINILAKE(dev_priv)) {
1924 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1925 		temp |= DCC_DELAY_RANGE_2;
1926 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1927 	}
1928 
1929 	/*
1930 	 * While we write to the group register to program all lanes at once we
1931 	 * can read only lane registers and we pick lanes 0/1 for that.
1932 	 */
1933 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1934 	temp &= ~LANE_STAGGER_MASK;
1935 	temp &= ~LANESTAGGER_STRAP_OVRD;
1936 	temp |= pll->state.hw_state.pcsdw12;
1937 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1938 }
1939 
1940 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1941 					struct intel_shared_dpll *pll)
1942 {
1943 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1944 	u32 temp;
1945 
1946 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1947 	temp &= ~PORT_PLL_ENABLE;
1948 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1949 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1950 
1951 	if (IS_GEMINILAKE(dev_priv)) {
1952 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1953 		temp &= ~PORT_PLL_POWER_ENABLE;
1954 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1955 
1956 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1957 				  PORT_PLL_POWER_STATE), 200))
1958 			drm_err(&dev_priv->drm,
1959 				"Power state not reset for PLL:%d\n", port);
1960 	}
1961 }
1962 
1963 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1964 					struct intel_shared_dpll *pll,
1965 					struct intel_dpll_hw_state *hw_state)
1966 {
1967 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1968 	intel_wakeref_t wakeref;
1969 	enum dpio_phy phy;
1970 	enum dpio_channel ch;
1971 	u32 val;
1972 	bool ret;
1973 
1974 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1975 
1976 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1977 						     POWER_DOMAIN_DISPLAY_CORE);
1978 	if (!wakeref)
1979 		return false;
1980 
1981 	ret = false;
1982 
1983 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1984 	if (!(val & PORT_PLL_ENABLE))
1985 		goto out;
1986 
1987 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1988 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1989 
1990 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1991 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1992 
1993 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1994 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1995 
1996 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1997 	hw_state->pll1 &= PORT_PLL_N_MASK;
1998 
1999 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2000 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2001 
2002 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2003 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2004 
2005 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2006 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2007 			  PORT_PLL_INT_COEFF_MASK |
2008 			  PORT_PLL_GAIN_CTL_MASK;
2009 
2010 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2011 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2012 
2013 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2014 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2015 
2016 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2017 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2018 			   PORT_PLL_DCO_AMP_MASK;
2019 
2020 	/*
2021 	 * While we write to the group register to program all lanes at once we
2022 	 * can read only lane registers. We configure all lanes the same way, so
2023 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2024 	 */
2025 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2026 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2027 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2028 		drm_dbg(&dev_priv->drm,
2029 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2030 			hw_state->pcsdw12,
2031 			intel_de_read(dev_priv,
2032 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2033 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2034 
2035 	ret = true;
2036 
2037 out:
2038 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2039 
2040 	return ret;
2041 }
2042 
2043 /* bxt clock parameters */
2044 struct bxt_clk_div {
2045 	int clock;
2046 	u32 p1;
2047 	u32 p2;
2048 	u32 m2_int;
2049 	u32 m2_frac;
2050 	bool m2_frac_en;
2051 	u32 n;
2052 
2053 	int vco;
2054 };
2055 
2056 /* pre-calculated values for DP linkrates */
2057 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2058 	{162000, 4, 2, 32, 1677722, 1, 1},
2059 	{270000, 4, 1, 27,       0, 0, 1},
2060 	{540000, 2, 1, 27,       0, 0, 1},
2061 	{216000, 3, 2, 32, 1677722, 1, 1},
2062 	{243000, 4, 1, 24, 1258291, 1, 1},
2063 	{324000, 4, 1, 32, 1677722, 1, 1},
2064 	{432000, 3, 1, 32, 1677722, 1, 1}
2065 };
2066 
2067 static bool
2068 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2069 			  struct bxt_clk_div *clk_div)
2070 {
2071 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2072 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2073 	struct dpll best_clock;
2074 
2075 	/* Calculate HDMI div */
2076 	/*
2077 	 * FIXME: tie the following calculation into
2078 	 * i9xx_crtc_compute_clock
2079 	 */
2080 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2081 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2082 			crtc_state->port_clock,
2083 			pipe_name(crtc->pipe));
2084 		return false;
2085 	}
2086 
2087 	clk_div->p1 = best_clock.p1;
2088 	clk_div->p2 = best_clock.p2;
2089 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2090 	clk_div->n = best_clock.n;
2091 	clk_div->m2_int = best_clock.m2 >> 22;
2092 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2093 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2094 
2095 	clk_div->vco = best_clock.vco;
2096 
2097 	return true;
2098 }
2099 
2100 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2101 				    struct bxt_clk_div *clk_div)
2102 {
2103 	int clock = crtc_state->port_clock;
2104 	int i;
2105 
2106 	*clk_div = bxt_dp_clk_val[0];
2107 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2108 		if (bxt_dp_clk_val[i].clock == clock) {
2109 			*clk_div = bxt_dp_clk_val[i];
2110 			break;
2111 		}
2112 	}
2113 
2114 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2115 }
2116 
2117 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2118 				      const struct bxt_clk_div *clk_div)
2119 {
2120 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2121 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2122 	int clock = crtc_state->port_clock;
2123 	int vco = clk_div->vco;
2124 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2125 	u32 lanestagger;
2126 
2127 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2128 
2129 	if (vco >= 6200000 && vco <= 6700000) {
2130 		prop_coef = 4;
2131 		int_coef = 9;
2132 		gain_ctl = 3;
2133 		targ_cnt = 8;
2134 	} else if ((vco > 5400000 && vco < 6200000) ||
2135 			(vco >= 4800000 && vco < 5400000)) {
2136 		prop_coef = 5;
2137 		int_coef = 11;
2138 		gain_ctl = 3;
2139 		targ_cnt = 9;
2140 	} else if (vco == 5400000) {
2141 		prop_coef = 3;
2142 		int_coef = 8;
2143 		gain_ctl = 1;
2144 		targ_cnt = 9;
2145 	} else {
2146 		drm_err(&i915->drm, "Invalid VCO\n");
2147 		return false;
2148 	}
2149 
2150 	if (clock > 270000)
2151 		lanestagger = 0x18;
2152 	else if (clock > 135000)
2153 		lanestagger = 0x0d;
2154 	else if (clock > 67000)
2155 		lanestagger = 0x07;
2156 	else if (clock > 33000)
2157 		lanestagger = 0x04;
2158 	else
2159 		lanestagger = 0x02;
2160 
2161 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2162 	dpll_hw_state->pll0 = clk_div->m2_int;
2163 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2164 	dpll_hw_state->pll2 = clk_div->m2_frac;
2165 
2166 	if (clk_div->m2_frac_en)
2167 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2168 
2169 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2170 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2171 
2172 	dpll_hw_state->pll8 = targ_cnt;
2173 
2174 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2175 
2176 	dpll_hw_state->pll10 =
2177 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2178 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2179 
2180 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2181 
2182 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2183 
2184 	return true;
2185 }
2186 
2187 static bool
2188 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2189 {
2190 	struct bxt_clk_div clk_div = {};
2191 
2192 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2193 
2194 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2195 }
2196 
2197 static bool
2198 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2199 {
2200 	struct bxt_clk_div clk_div = {};
2201 
2202 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2203 
2204 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2205 }
2206 
2207 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2208 				const struct intel_shared_dpll *pll)
2209 {
2210 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2211 	struct dpll clock;
2212 
2213 	clock.m1 = 2;
2214 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2215 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2216 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2217 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2218 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2219 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2220 
2221 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2222 }
2223 
2224 static bool bxt_get_dpll(struct intel_atomic_state *state,
2225 			 struct intel_crtc *crtc,
2226 			 struct intel_encoder *encoder)
2227 {
2228 	struct intel_crtc_state *crtc_state =
2229 		intel_atomic_get_new_crtc_state(state, crtc);
2230 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2231 	struct intel_shared_dpll *pll;
2232 	enum intel_dpll_id id;
2233 
2234 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2235 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2236 		return false;
2237 
2238 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2239 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2240 		return false;
2241 
2242 	/* 1:1 mapping between ports and PLLs */
2243 	id = (enum intel_dpll_id) encoder->port;
2244 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2245 
2246 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2247 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2248 
2249 	intel_reference_shared_dpll(state, crtc,
2250 				    pll, &crtc_state->dpll_hw_state);
2251 
2252 	crtc_state->shared_dpll = pll;
2253 
2254 	return true;
2255 }
2256 
2257 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2258 {
2259 	i915->dpll.ref_clks.ssc = 100000;
2260 	i915->dpll.ref_clks.nssc = 100000;
2261 	/* DSI non-SSC ref 19.2MHz */
2262 }
2263 
2264 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2265 			      const struct intel_dpll_hw_state *hw_state)
2266 {
2267 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2268 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2269 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2270 		    hw_state->ebb0,
2271 		    hw_state->ebb4,
2272 		    hw_state->pll0,
2273 		    hw_state->pll1,
2274 		    hw_state->pll2,
2275 		    hw_state->pll3,
2276 		    hw_state->pll6,
2277 		    hw_state->pll8,
2278 		    hw_state->pll9,
2279 		    hw_state->pll10,
2280 		    hw_state->pcsdw12);
2281 }
2282 
2283 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2284 	.enable = bxt_ddi_pll_enable,
2285 	.disable = bxt_ddi_pll_disable,
2286 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2287 	.get_freq = bxt_ddi_pll_get_freq,
2288 };
2289 
2290 static const struct dpll_info bxt_plls[] = {
2291 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2292 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2293 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2294 	{ },
2295 };
2296 
2297 static const struct intel_dpll_mgr bxt_pll_mgr = {
2298 	.dpll_info = bxt_plls,
2299 	.get_dplls = bxt_get_dpll,
2300 	.put_dplls = intel_put_dpll,
2301 	.update_ref_clks = bxt_update_dpll_ref_clks,
2302 	.dump_hw_state = bxt_dump_hw_state,
2303 };
2304 
2305 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2306 			       struct intel_shared_dpll *pll)
2307 {
2308 	const enum intel_dpll_id id = pll->info->id;
2309 	u32 val;
2310 
2311 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2312 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2313 	val |= PLL_POWER_ENABLE;
2314 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2315 
2316 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2317 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2318 				  PLL_POWER_STATE, 5))
2319 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2320 
2321 	/*
2322 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2323 	 * select DP mode, and set DP link rate.
2324 	 */
2325 	val = pll->state.hw_state.cfgcr0;
2326 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2327 
2328 	/* 4. Reab back to ensure writes completed */
2329 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2330 
2331 	/* 3. Configure DPLL_CFGCR0 */
2332 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2333 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2334 		val = pll->state.hw_state.cfgcr1;
2335 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2336 		/* 4. Reab back to ensure writes completed */
2337 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2338 	}
2339 
2340 	/*
2341 	 * 5. If the frequency will result in a change to the voltage
2342 	 * requirement, follow the Display Voltage Frequency Switching
2343 	 * Sequence Before Frequency Change
2344 	 *
2345 	 * Note: DVFS is actually handled via the cdclk code paths,
2346 	 * hence we do nothing here.
2347 	 */
2348 
2349 	/* 6. Enable DPLL in DPLL_ENABLE. */
2350 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2351 	val |= PLL_ENABLE;
2352 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2353 
2354 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2355 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2356 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2357 
2358 	/*
2359 	 * 8. If the frequency will result in a change to the voltage
2360 	 * requirement, follow the Display Voltage Frequency Switching
2361 	 * Sequence After Frequency Change
2362 	 *
2363 	 * Note: DVFS is actually handled via the cdclk code paths,
2364 	 * hence we do nothing here.
2365 	 */
2366 
2367 	/*
2368 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2369 	 * Done at intel_ddi_clk_select
2370 	 */
2371 }
2372 
2373 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2374 				struct intel_shared_dpll *pll)
2375 {
2376 	const enum intel_dpll_id id = pll->info->id;
2377 	u32 val;
2378 
2379 	/*
2380 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2381 	 * Done at intel_ddi_post_disable
2382 	 */
2383 
2384 	/*
2385 	 * 2. If the frequency will result in a change to the voltage
2386 	 * requirement, follow the Display Voltage Frequency Switching
2387 	 * Sequence Before Frequency Change
2388 	 *
2389 	 * Note: DVFS is actually handled via the cdclk code paths,
2390 	 * hence we do nothing here.
2391 	 */
2392 
2393 	/* 3. Disable DPLL through DPLL_ENABLE. */
2394 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2395 	val &= ~PLL_ENABLE;
2396 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2397 
2398 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2399 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2400 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2401 
2402 	/*
2403 	 * 5. If the frequency will result in a change to the voltage
2404 	 * requirement, follow the Display Voltage Frequency Switching
2405 	 * Sequence After Frequency Change
2406 	 *
2407 	 * Note: DVFS is actually handled via the cdclk code paths,
2408 	 * hence we do nothing here.
2409 	 */
2410 
2411 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2412 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2413 	val &= ~PLL_POWER_ENABLE;
2414 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2415 
2416 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2417 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2418 				    PLL_POWER_STATE, 5))
2419 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2420 }
2421 
2422 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2423 				     struct intel_shared_dpll *pll,
2424 				     struct intel_dpll_hw_state *hw_state)
2425 {
2426 	const enum intel_dpll_id id = pll->info->id;
2427 	intel_wakeref_t wakeref;
2428 	u32 val;
2429 	bool ret;
2430 
2431 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2432 						     POWER_DOMAIN_DISPLAY_CORE);
2433 	if (!wakeref)
2434 		return false;
2435 
2436 	ret = false;
2437 
2438 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2439 	if (!(val & PLL_ENABLE))
2440 		goto out;
2441 
2442 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2443 	hw_state->cfgcr0 = val;
2444 
2445 	/* avoid reading back stale values if HDMI mode is not enabled */
2446 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2447 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2448 						 CNL_DPLL_CFGCR1(id));
2449 	}
2450 	ret = true;
2451 
2452 out:
2453 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2454 
2455 	return ret;
2456 }
2457 
2458 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2459 				      int *qdiv, int *kdiv)
2460 {
2461 	/* even dividers */
2462 	if (bestdiv % 2 == 0) {
2463 		if (bestdiv == 2) {
2464 			*pdiv = 2;
2465 			*qdiv = 1;
2466 			*kdiv = 1;
2467 		} else if (bestdiv % 4 == 0) {
2468 			*pdiv = 2;
2469 			*qdiv = bestdiv / 4;
2470 			*kdiv = 2;
2471 		} else if (bestdiv % 6 == 0) {
2472 			*pdiv = 3;
2473 			*qdiv = bestdiv / 6;
2474 			*kdiv = 2;
2475 		} else if (bestdiv % 5 == 0) {
2476 			*pdiv = 5;
2477 			*qdiv = bestdiv / 10;
2478 			*kdiv = 2;
2479 		} else if (bestdiv % 14 == 0) {
2480 			*pdiv = 7;
2481 			*qdiv = bestdiv / 14;
2482 			*kdiv = 2;
2483 		}
2484 	} else {
2485 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2486 			*pdiv = bestdiv;
2487 			*qdiv = 1;
2488 			*kdiv = 1;
2489 		} else { /* 9, 15, 21 */
2490 			*pdiv = bestdiv / 3;
2491 			*qdiv = 1;
2492 			*kdiv = 3;
2493 		}
2494 	}
2495 }
2496 
2497 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2498 				      u32 dco_freq, u32 ref_freq,
2499 				      int pdiv, int qdiv, int kdiv)
2500 {
2501 	u32 dco;
2502 
2503 	switch (kdiv) {
2504 	case 1:
2505 		params->kdiv = 1;
2506 		break;
2507 	case 2:
2508 		params->kdiv = 2;
2509 		break;
2510 	case 3:
2511 		params->kdiv = 4;
2512 		break;
2513 	default:
2514 		WARN(1, "Incorrect KDiv\n");
2515 	}
2516 
2517 	switch (pdiv) {
2518 	case 2:
2519 		params->pdiv = 1;
2520 		break;
2521 	case 3:
2522 		params->pdiv = 2;
2523 		break;
2524 	case 5:
2525 		params->pdiv = 4;
2526 		break;
2527 	case 7:
2528 		params->pdiv = 8;
2529 		break;
2530 	default:
2531 		WARN(1, "Incorrect PDiv\n");
2532 	}
2533 
2534 	WARN_ON(kdiv != 2 && qdiv != 1);
2535 
2536 	params->qdiv_ratio = qdiv;
2537 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2538 
2539 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2540 
2541 	params->dco_integer = dco >> 15;
2542 	params->dco_fraction = dco & 0x7fff;
2543 }
2544 
2545 static bool
2546 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2547 			  struct skl_wrpll_params *wrpll_params,
2548 			  int ref_clock)
2549 {
2550 	u32 afe_clock = crtc_state->port_clock * 5;
2551 	u32 dco_min = 7998000;
2552 	u32 dco_max = 10000000;
2553 	u32 dco_mid = (dco_min + dco_max) / 2;
2554 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2555 					 18, 20, 24, 28, 30, 32,  36,  40,
2556 					 42, 44, 48, 50, 52, 54,  56,  60,
2557 					 64, 66, 68, 70, 72, 76,  78,  80,
2558 					 84, 88, 90, 92, 96, 98, 100, 102,
2559 					  3,  5,  7,  9, 15, 21 };
2560 	u32 dco, best_dco = 0, dco_centrality = 0;
2561 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2562 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2563 
2564 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2565 		dco = afe_clock * dividers[d];
2566 
2567 		if ((dco <= dco_max) && (dco >= dco_min)) {
2568 			dco_centrality = abs(dco - dco_mid);
2569 
2570 			if (dco_centrality < best_dco_centrality) {
2571 				best_dco_centrality = dco_centrality;
2572 				best_div = dividers[d];
2573 				best_dco = dco;
2574 			}
2575 		}
2576 	}
2577 
2578 	if (best_div == 0)
2579 		return false;
2580 
2581 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2582 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2583 				  pdiv, qdiv, kdiv);
2584 
2585 	return true;
2586 }
2587 
2588 static bool
2589 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2590 			struct skl_wrpll_params *wrpll_params)
2591 {
2592 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2593 
2594 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2595 					 i915->dpll.ref_clks.nssc);
2596 }
2597 
2598 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2599 {
2600 	u32 cfgcr0, cfgcr1;
2601 	struct skl_wrpll_params wrpll_params = { 0, };
2602 
2603 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2604 
2605 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2606 		return false;
2607 
2608 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2609 		wrpll_params.dco_integer;
2610 
2611 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2612 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2613 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2614 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2615 		DPLL_CFGCR1_CENTRAL_FREQ;
2616 
2617 	memset(&crtc_state->dpll_hw_state, 0,
2618 	       sizeof(crtc_state->dpll_hw_state));
2619 
2620 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2621 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2622 	return true;
2623 }
2624 
2625 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2626 				    const struct intel_shared_dpll *pll,
2627 				    int ref_clock)
2628 {
2629 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2630 	u32 p0, p1, p2, dco_freq;
2631 
2632 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2633 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2634 
2635 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2636 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2637 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2638 	else
2639 		p1 = 1;
2640 
2641 
2642 	switch (p0) {
2643 	case DPLL_CFGCR1_PDIV_2:
2644 		p0 = 2;
2645 		break;
2646 	case DPLL_CFGCR1_PDIV_3:
2647 		p0 = 3;
2648 		break;
2649 	case DPLL_CFGCR1_PDIV_5:
2650 		p0 = 5;
2651 		break;
2652 	case DPLL_CFGCR1_PDIV_7:
2653 		p0 = 7;
2654 		break;
2655 	}
2656 
2657 	switch (p2) {
2658 	case DPLL_CFGCR1_KDIV_1:
2659 		p2 = 1;
2660 		break;
2661 	case DPLL_CFGCR1_KDIV_2:
2662 		p2 = 2;
2663 		break;
2664 	case DPLL_CFGCR1_KDIV_3:
2665 		p2 = 3;
2666 		break;
2667 	}
2668 
2669 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2670 		   ref_clock;
2671 
2672 	dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2673 		      DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
2674 
2675 	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2676 		return 0;
2677 
2678 	return dco_freq / (p0 * p1 * p2 * 5);
2679 }
2680 
2681 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2682 				  const struct intel_shared_dpll *pll)
2683 {
2684 	return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
2685 }
2686 
2687 static bool
2688 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2689 {
2690 	u32 cfgcr0;
2691 
2692 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2693 
2694 	switch (crtc_state->port_clock / 2) {
2695 	case 81000:
2696 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2697 		break;
2698 	case 135000:
2699 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2700 		break;
2701 	case 270000:
2702 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2703 		break;
2704 		/* eDP 1.4 rates */
2705 	case 162000:
2706 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2707 		break;
2708 	case 108000:
2709 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2710 		break;
2711 	case 216000:
2712 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2713 		break;
2714 	case 324000:
2715 		/* Some SKUs may require elevated I/O voltage to support this */
2716 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2717 		break;
2718 	case 405000:
2719 		/* Some SKUs may require elevated I/O voltage to support this */
2720 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2721 		break;
2722 	}
2723 
2724 	memset(&crtc_state->dpll_hw_state, 0,
2725 	       sizeof(crtc_state->dpll_hw_state));
2726 
2727 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2728 
2729 	return true;
2730 }
2731 
2732 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2733 				  const struct intel_shared_dpll *pll)
2734 {
2735 	int link_clock = 0;
2736 
2737 	switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2738 	case DPLL_CFGCR0_LINK_RATE_810:
2739 		link_clock = 81000;
2740 		break;
2741 	case DPLL_CFGCR0_LINK_RATE_1080:
2742 		link_clock = 108000;
2743 		break;
2744 	case DPLL_CFGCR0_LINK_RATE_1350:
2745 		link_clock = 135000;
2746 		break;
2747 	case DPLL_CFGCR0_LINK_RATE_1620:
2748 		link_clock = 162000;
2749 		break;
2750 	case DPLL_CFGCR0_LINK_RATE_2160:
2751 		link_clock = 216000;
2752 		break;
2753 	case DPLL_CFGCR0_LINK_RATE_2700:
2754 		link_clock = 270000;
2755 		break;
2756 	case DPLL_CFGCR0_LINK_RATE_3240:
2757 		link_clock = 324000;
2758 		break;
2759 	case DPLL_CFGCR0_LINK_RATE_4050:
2760 		link_clock = 405000;
2761 		break;
2762 	default:
2763 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2764 		break;
2765 	}
2766 
2767 	return link_clock * 2;
2768 }
2769 
2770 static bool cnl_get_dpll(struct intel_atomic_state *state,
2771 			 struct intel_crtc *crtc,
2772 			 struct intel_encoder *encoder)
2773 {
2774 	struct intel_crtc_state *crtc_state =
2775 		intel_atomic_get_new_crtc_state(state, crtc);
2776 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2777 	struct intel_shared_dpll *pll;
2778 	bool bret;
2779 
2780 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2781 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2782 		if (!bret) {
2783 			drm_dbg_kms(&i915->drm,
2784 				    "Could not get HDMI pll dividers.\n");
2785 			return false;
2786 		}
2787 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2788 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2789 		if (!bret) {
2790 			drm_dbg_kms(&i915->drm,
2791 				    "Could not set DP dpll HW state.\n");
2792 			return false;
2793 		}
2794 	} else {
2795 		drm_dbg_kms(&i915->drm,
2796 			    "Skip DPLL setup for output_types 0x%x\n",
2797 			    crtc_state->output_types);
2798 		return false;
2799 	}
2800 
2801 	pll = intel_find_shared_dpll(state, crtc,
2802 				     &crtc_state->dpll_hw_state,
2803 				     BIT(DPLL_ID_SKL_DPLL2) |
2804 				     BIT(DPLL_ID_SKL_DPLL1) |
2805 				     BIT(DPLL_ID_SKL_DPLL0));
2806 	if (!pll) {
2807 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2808 		return false;
2809 	}
2810 
2811 	intel_reference_shared_dpll(state, crtc,
2812 				    pll, &crtc_state->dpll_hw_state);
2813 
2814 	crtc_state->shared_dpll = pll;
2815 
2816 	return true;
2817 }
2818 
2819 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2820 				const struct intel_shared_dpll *pll)
2821 {
2822 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2823 		return cnl_ddi_wrpll_get_freq(i915, pll);
2824 	else
2825 		return cnl_ddi_lcpll_get_freq(i915, pll);
2826 }
2827 
2828 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2829 {
2830 	/* No SSC reference */
2831 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2832 }
2833 
2834 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2835 			      const struct intel_dpll_hw_state *hw_state)
2836 {
2837 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2838 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2839 		    hw_state->cfgcr0,
2840 		    hw_state->cfgcr1);
2841 }
2842 
2843 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2844 	.enable = cnl_ddi_pll_enable,
2845 	.disable = cnl_ddi_pll_disable,
2846 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2847 	.get_freq = cnl_ddi_pll_get_freq,
2848 };
2849 
2850 static const struct dpll_info cnl_plls[] = {
2851 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2852 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2853 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2854 	{ },
2855 };
2856 
2857 static const struct intel_dpll_mgr cnl_pll_mgr = {
2858 	.dpll_info = cnl_plls,
2859 	.get_dplls = cnl_get_dpll,
2860 	.put_dplls = intel_put_dpll,
2861 	.update_ref_clks = cnl_update_dpll_ref_clks,
2862 	.dump_hw_state = cnl_dump_hw_state,
2863 };
2864 
2865 struct icl_combo_pll_params {
2866 	int clock;
2867 	struct skl_wrpll_params wrpll;
2868 };
2869 
2870 /*
2871  * These values alrea already adjusted: they're the bits we write to the
2872  * registers, not the logical values.
2873  */
2874 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2875 	{ 540000,
2876 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2877 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2878 	{ 270000,
2879 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2880 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2881 	{ 162000,
2882 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2883 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2884 	{ 324000,
2885 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2886 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2887 	{ 216000,
2888 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2889 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2890 	{ 432000,
2891 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2892 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2893 	{ 648000,
2894 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2895 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2896 	{ 810000,
2897 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2898 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2899 };
2900 
2901 
2902 /* Also used for 38.4 MHz values. */
2903 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2904 	{ 540000,
2905 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2906 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2907 	{ 270000,
2908 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2909 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2910 	{ 162000,
2911 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2912 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2913 	{ 324000,
2914 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2915 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2916 	{ 216000,
2917 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2918 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2919 	{ 432000,
2920 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2921 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2922 	{ 648000,
2923 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2924 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2925 	{ 810000,
2926 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2927 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2928 };
2929 
2930 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2931 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2932 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2933 };
2934 
2935 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2936 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2937 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2938 };
2939 
2940 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2941 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2942 	/* the following params are unused */
2943 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2944 };
2945 
2946 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2947 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2948 	/* the following params are unused */
2949 };
2950 
2951 /*
2952  * Display WA #22010492432: tgl
2953  * Divide the nominal .dco_fraction value by 2.
2954  */
2955 static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = {
2956 	.dco_integer = 0x54, .dco_fraction = 0x1800,
2957 	/* the following params are unused */
2958 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2959 };
2960 
2961 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2962 				  struct skl_wrpll_params *pll_params)
2963 {
2964 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2965 	const struct icl_combo_pll_params *params =
2966 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2967 		icl_dp_combo_pll_24MHz_values :
2968 		icl_dp_combo_pll_19_2MHz_values;
2969 	int clock = crtc_state->port_clock;
2970 	int i;
2971 
2972 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2973 		if (clock == params[i].clock) {
2974 			*pll_params = params[i].wrpll;
2975 			return true;
2976 		}
2977 	}
2978 
2979 	MISSING_CASE(clock);
2980 	return false;
2981 }
2982 
2983 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2984 			     struct skl_wrpll_params *pll_params)
2985 {
2986 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2987 
2988 	if (INTEL_GEN(dev_priv) >= 12) {
2989 		switch (dev_priv->dpll.ref_clks.nssc) {
2990 		default:
2991 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2992 			fallthrough;
2993 		case 19200:
2994 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2995 			break;
2996 		case 24000:
2997 			*pll_params = tgl_tbt_pll_24MHz_values;
2998 			break;
2999 		case 38400:
3000 			*pll_params = tgl_tbt_pll_38_4MHz_values;
3001 			break;
3002 		}
3003 	} else {
3004 		switch (dev_priv->dpll.ref_clks.nssc) {
3005 		default:
3006 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3007 			fallthrough;
3008 		case 19200:
3009 		case 38400:
3010 			*pll_params = icl_tbt_pll_19_2MHz_values;
3011 			break;
3012 		case 24000:
3013 			*pll_params = icl_tbt_pll_24MHz_values;
3014 			break;
3015 		}
3016 	}
3017 
3018 	return true;
3019 }
3020 
3021 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3022 				    const struct intel_shared_dpll *pll)
3023 {
3024 	/*
3025 	 * The PLL outputs multiple frequencies at the same time, selection is
3026 	 * made at DDI clock mux level.
3027 	 */
3028 	drm_WARN_ON(&i915->drm, 1);
3029 
3030 	return 0;
3031 }
3032 
3033 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3034 {
3035 	int ref_clock = i915->dpll.ref_clks.nssc;
3036 
3037 	/*
3038 	 * For ICL+, the spec states: if reference frequency is 38.4,
3039 	 * use 19.2 because the DPLL automatically divides that by 2.
3040 	 */
3041 	if (ref_clock == 38400)
3042 		ref_clock = 19200;
3043 
3044 	return ref_clock;
3045 }
3046 
3047 static bool
3048 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3049 	       struct skl_wrpll_params *wrpll_params)
3050 {
3051 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3052 
3053 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3054 					 icl_wrpll_ref_clock(i915));
3055 }
3056 
3057 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3058 				      const struct intel_shared_dpll *pll)
3059 {
3060 	return __cnl_ddi_wrpll_get_freq(i915, pll,
3061 					icl_wrpll_ref_clock(i915));
3062 }
3063 
3064 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3065 				const struct skl_wrpll_params *pll_params,
3066 				struct intel_dpll_hw_state *pll_state)
3067 {
3068 	memset(pll_state, 0, sizeof(*pll_state));
3069 
3070 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) |
3071 			    pll_params->dco_integer;
3072 
3073 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3074 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3075 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3076 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3077 
3078 	if (INTEL_GEN(i915) >= 12)
3079 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3080 	else
3081 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3082 }
3083 
3084 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3085 {
3086 	return id - DPLL_ID_ICL_MGPLL1;
3087 }
3088 
3089 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3090 {
3091 	return tc_port + DPLL_ID_ICL_MGPLL1;
3092 }
3093 
3094 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3095 				     u32 *target_dco_khz,
3096 				     struct intel_dpll_hw_state *state,
3097 				     bool is_dkl)
3098 {
3099 	u32 dco_min_freq, dco_max_freq;
3100 	int div1_vals[] = {7, 5, 3, 2};
3101 	unsigned int i;
3102 	int div2;
3103 
3104 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3105 	dco_max_freq = is_dp ? 8100000 : 10000000;
3106 
3107 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3108 		int div1 = div1_vals[i];
3109 
3110 		for (div2 = 10; div2 > 0; div2--) {
3111 			int dco = div1 * div2 * clock_khz * 5;
3112 			int a_divratio, tlinedrv, inputsel;
3113 			u32 hsdiv;
3114 
3115 			if (dco < dco_min_freq || dco > dco_max_freq)
3116 				continue;
3117 
3118 			if (div2 >= 2) {
3119 				/*
3120 				 * Note: a_divratio not matching TGL BSpec
3121 				 * algorithm but matching hardcoded values and
3122 				 * working on HW for DP alt-mode at least
3123 				 */
3124 				a_divratio = is_dp ? 10 : 5;
3125 				tlinedrv = is_dkl ? 1 : 2;
3126 			} else {
3127 				a_divratio = 5;
3128 				tlinedrv = 0;
3129 			}
3130 			inputsel = is_dp ? 0 : 1;
3131 
3132 			switch (div1) {
3133 			default:
3134 				MISSING_CASE(div1);
3135 				fallthrough;
3136 			case 2:
3137 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3138 				break;
3139 			case 3:
3140 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3141 				break;
3142 			case 5:
3143 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3144 				break;
3145 			case 7:
3146 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3147 				break;
3148 			}
3149 
3150 			*target_dco_khz = dco;
3151 
3152 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3153 
3154 			state->mg_clktop2_coreclkctl1 =
3155 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3156 
3157 			state->mg_clktop2_hsclkctl =
3158 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3159 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3160 				hsdiv |
3161 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3162 
3163 			return true;
3164 		}
3165 	}
3166 
3167 	return false;
3168 }
3169 
3170 /*
3171  * The specification for this function uses real numbers, so the math had to be
3172  * adapted to integer-only calculation, that's why it looks so different.
3173  */
3174 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3175 				  struct intel_dpll_hw_state *pll_state)
3176 {
3177 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3178 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3179 	int clock = crtc_state->port_clock;
3180 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3181 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3182 	u32 prop_coeff, int_coeff;
3183 	u32 tdc_targetcnt, feedfwgain;
3184 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3185 	u64 tmp;
3186 	bool use_ssc = false;
3187 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3188 	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
3189 
3190 	memset(pll_state, 0, sizeof(*pll_state));
3191 
3192 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3193 				      pll_state, is_dkl)) {
3194 		drm_dbg_kms(&dev_priv->drm,
3195 			    "Failed to find divisors for clock %d\n", clock);
3196 		return false;
3197 	}
3198 
3199 	m1div = 2;
3200 	m2div_int = dco_khz / (refclk_khz * m1div);
3201 	if (m2div_int > 255) {
3202 		if (!is_dkl) {
3203 			m1div = 4;
3204 			m2div_int = dco_khz / (refclk_khz * m1div);
3205 		}
3206 
3207 		if (m2div_int > 255) {
3208 			drm_dbg_kms(&dev_priv->drm,
3209 				    "Failed to find mdiv for clock %d\n",
3210 				    clock);
3211 			return false;
3212 		}
3213 	}
3214 	m2div_rem = dco_khz % (refclk_khz * m1div);
3215 
3216 	tmp = (u64)m2div_rem * (1 << 22);
3217 	do_div(tmp, refclk_khz * m1div);
3218 	m2div_frac = tmp;
3219 
3220 	switch (refclk_khz) {
3221 	case 19200:
3222 		iref_ndiv = 1;
3223 		iref_trim = 28;
3224 		iref_pulse_w = 1;
3225 		break;
3226 	case 24000:
3227 		iref_ndiv = 1;
3228 		iref_trim = 25;
3229 		iref_pulse_w = 2;
3230 		break;
3231 	case 38400:
3232 		iref_ndiv = 2;
3233 		iref_trim = 28;
3234 		iref_pulse_w = 1;
3235 		break;
3236 	default:
3237 		MISSING_CASE(refclk_khz);
3238 		return false;
3239 	}
3240 
3241 	/*
3242 	 * tdc_res = 0.000003
3243 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3244 	 *
3245 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3246 	 * was supposed to be a division, but we rearranged the operations of
3247 	 * the formula to avoid early divisions so we don't multiply the
3248 	 * rounding errors.
3249 	 *
3250 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3251 	 * we also rearrange to work with integers.
3252 	 *
3253 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3254 	 * last division by 10.
3255 	 */
3256 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3257 
3258 	/*
3259 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3260 	 * 32 bits. That's not a problem since we round the division down
3261 	 * anyway.
3262 	 */
3263 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3264 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3265 
3266 	if (dco_khz >= 9000000) {
3267 		prop_coeff = 5;
3268 		int_coeff = 10;
3269 	} else {
3270 		prop_coeff = 4;
3271 		int_coeff = 8;
3272 	}
3273 
3274 	if (use_ssc) {
3275 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3276 		do_div(tmp, refclk_khz * m1div * 10000);
3277 		ssc_stepsize = tmp;
3278 
3279 		tmp = mul_u32_u32(dco_khz, 1000);
3280 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3281 	} else {
3282 		ssc_stepsize = 0;
3283 		ssc_steplen = 0;
3284 	}
3285 	ssc_steplog = 4;
3286 
3287 	/* write pll_state calculations */
3288 	if (is_dkl) {
3289 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3290 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3291 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3292 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3293 
3294 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3295 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3296 
3297 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3298 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3299 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3300 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3301 
3302 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3303 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3304 
3305 		pll_state->mg_pll_tdc_coldst_bias =
3306 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3307 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3308 
3309 	} else {
3310 		pll_state->mg_pll_div0 =
3311 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3312 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3313 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3314 
3315 		pll_state->mg_pll_div1 =
3316 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3317 			MG_PLL_DIV1_DITHER_DIV_2 |
3318 			MG_PLL_DIV1_NDIVRATIO(1) |
3319 			MG_PLL_DIV1_FBPREDIV(m1div);
3320 
3321 		pll_state->mg_pll_lf =
3322 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3323 			MG_PLL_LF_AFCCNTSEL_512 |
3324 			MG_PLL_LF_GAINCTRL(1) |
3325 			MG_PLL_LF_INT_COEFF(int_coeff) |
3326 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3327 
3328 		pll_state->mg_pll_frac_lock =
3329 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3330 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3331 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3332 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3333 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3334 		if (use_ssc || m2div_rem > 0)
3335 			pll_state->mg_pll_frac_lock |=
3336 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3337 
3338 		pll_state->mg_pll_ssc =
3339 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3340 			MG_PLL_SSC_TYPE(2) |
3341 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3342 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3343 			MG_PLL_SSC_FLLEN |
3344 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3345 
3346 		pll_state->mg_pll_tdc_coldst_bias =
3347 			MG_PLL_TDC_COLDST_COLDSTART |
3348 			MG_PLL_TDC_COLDST_IREFINT_EN |
3349 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3350 			MG_PLL_TDC_TDCOVCCORR_EN |
3351 			MG_PLL_TDC_TDCSEL(3);
3352 
3353 		pll_state->mg_pll_bias =
3354 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3355 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3356 			MG_PLL_BIAS_BIAS_BONUS(10) |
3357 			MG_PLL_BIAS_BIASCAL_EN |
3358 			MG_PLL_BIAS_CTRIM(12) |
3359 			MG_PLL_BIAS_VREF_RDAC(4) |
3360 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3361 
3362 		if (refclk_khz == 38400) {
3363 			pll_state->mg_pll_tdc_coldst_bias_mask =
3364 				MG_PLL_TDC_COLDST_COLDSTART;
3365 			pll_state->mg_pll_bias_mask = 0;
3366 		} else {
3367 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3368 			pll_state->mg_pll_bias_mask = -1U;
3369 		}
3370 
3371 		pll_state->mg_pll_tdc_coldst_bias &=
3372 			pll_state->mg_pll_tdc_coldst_bias_mask;
3373 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3374 	}
3375 
3376 	return true;
3377 }
3378 
3379 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3380 				   const struct intel_shared_dpll *pll)
3381 {
3382 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
3383 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3384 	u64 tmp;
3385 
3386 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3387 
3388 	if (INTEL_GEN(dev_priv) >= 12) {
3389 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3390 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3391 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3392 
3393 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3394 			m2_frac = pll_state->mg_pll_bias &
3395 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3396 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3397 		} else {
3398 			m2_frac = 0;
3399 		}
3400 	} else {
3401 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3402 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3403 
3404 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3405 			m2_frac = pll_state->mg_pll_div0 &
3406 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3407 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3408 		} else {
3409 			m2_frac = 0;
3410 		}
3411 	}
3412 
3413 	switch (pll_state->mg_clktop2_hsclkctl &
3414 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3415 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3416 		div1 = 2;
3417 		break;
3418 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3419 		div1 = 3;
3420 		break;
3421 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3422 		div1 = 5;
3423 		break;
3424 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3425 		div1 = 7;
3426 		break;
3427 	default:
3428 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3429 		return 0;
3430 	}
3431 
3432 	div2 = (pll_state->mg_clktop2_hsclkctl &
3433 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3434 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3435 
3436 	/* div2 value of 0 is same as 1 means no div */
3437 	if (div2 == 0)
3438 		div2 = 1;
3439 
3440 	/*
3441 	 * Adjust the original formula to delay the division by 2^22 in order to
3442 	 * minimize possible rounding errors.
3443 	 */
3444 	tmp = (u64)m1 * m2_int * ref_clock +
3445 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3446 	tmp = div_u64(tmp, 5 * div1 * div2);
3447 
3448 	return tmp;
3449 }
3450 
3451 /**
3452  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3453  * @crtc_state: state for the CRTC to select the DPLL for
3454  * @port_dpll_id: the active @port_dpll_id to select
3455  *
3456  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3457  * CRTC.
3458  */
3459 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3460 			      enum icl_port_dpll_id port_dpll_id)
3461 {
3462 	struct icl_port_dpll *port_dpll =
3463 		&crtc_state->icl_port_dplls[port_dpll_id];
3464 
3465 	crtc_state->shared_dpll = port_dpll->pll;
3466 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3467 }
3468 
3469 static void icl_update_active_dpll(struct intel_atomic_state *state,
3470 				   struct intel_crtc *crtc,
3471 				   struct intel_encoder *encoder)
3472 {
3473 	struct intel_crtc_state *crtc_state =
3474 		intel_atomic_get_new_crtc_state(state, crtc);
3475 	struct intel_digital_port *primary_port;
3476 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3477 
3478 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3479 		enc_to_mst(encoder)->primary :
3480 		enc_to_dig_port(encoder);
3481 
3482 	if (primary_port &&
3483 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3484 	     primary_port->tc_mode == TC_PORT_LEGACY))
3485 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3486 
3487 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3488 }
3489 
3490 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3491 {
3492 	if (!(i915->hti_state & HDPORT_ENABLED))
3493 		return 0;
3494 
3495 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3496 }
3497 
3498 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3499 				   struct intel_crtc *crtc,
3500 				   struct intel_encoder *encoder)
3501 {
3502 	struct intel_crtc_state *crtc_state =
3503 		intel_atomic_get_new_crtc_state(state, crtc);
3504 	struct skl_wrpll_params pll_params = { };
3505 	struct icl_port_dpll *port_dpll =
3506 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3507 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3508 	enum port port = encoder->port;
3509 	unsigned long dpll_mask;
3510 	int ret;
3511 
3512 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3513 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3514 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3515 	else
3516 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3517 
3518 	if (!ret) {
3519 		drm_dbg_kms(&dev_priv->drm,
3520 			    "Could not calculate combo PHY PLL state.\n");
3521 
3522 		return false;
3523 	}
3524 
3525 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3526 
3527 	if (IS_ROCKETLAKE(dev_priv)) {
3528 		dpll_mask =
3529 			BIT(DPLL_ID_EHL_DPLL4) |
3530 			BIT(DPLL_ID_ICL_DPLL1) |
3531 			BIT(DPLL_ID_ICL_DPLL0);
3532 	} else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) {
3533 		dpll_mask =
3534 			BIT(DPLL_ID_EHL_DPLL4) |
3535 			BIT(DPLL_ID_ICL_DPLL1) |
3536 			BIT(DPLL_ID_ICL_DPLL0);
3537 	} else {
3538 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3539 	}
3540 
3541 	/* Eliminate DPLLs from consideration if reserved by HTI */
3542 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3543 
3544 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3545 						&port_dpll->hw_state,
3546 						dpll_mask);
3547 	if (!port_dpll->pll) {
3548 		drm_dbg_kms(&dev_priv->drm,
3549 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3550 			    encoder->base.base.id, encoder->base.name);
3551 		return false;
3552 	}
3553 
3554 	intel_reference_shared_dpll(state, crtc,
3555 				    port_dpll->pll, &port_dpll->hw_state);
3556 
3557 	icl_update_active_dpll(state, crtc, encoder);
3558 
3559 	return true;
3560 }
3561 
3562 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3563 				 struct intel_crtc *crtc,
3564 				 struct intel_encoder *encoder)
3565 {
3566 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3567 	struct intel_crtc_state *crtc_state =
3568 		intel_atomic_get_new_crtc_state(state, crtc);
3569 	struct skl_wrpll_params pll_params = { };
3570 	struct icl_port_dpll *port_dpll;
3571 	enum intel_dpll_id dpll_id;
3572 
3573 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3574 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3575 		drm_dbg_kms(&dev_priv->drm,
3576 			    "Could not calculate TBT PLL state.\n");
3577 		return false;
3578 	}
3579 
3580 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3581 
3582 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3583 						&port_dpll->hw_state,
3584 						BIT(DPLL_ID_ICL_TBTPLL));
3585 	if (!port_dpll->pll) {
3586 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3587 		return false;
3588 	}
3589 	intel_reference_shared_dpll(state, crtc,
3590 				    port_dpll->pll, &port_dpll->hw_state);
3591 
3592 
3593 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3594 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3595 		drm_dbg_kms(&dev_priv->drm,
3596 			    "Could not calculate MG PHY PLL state.\n");
3597 		goto err_unreference_tbt_pll;
3598 	}
3599 
3600 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3601 							 encoder->port));
3602 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3603 						&port_dpll->hw_state,
3604 						BIT(dpll_id));
3605 	if (!port_dpll->pll) {
3606 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3607 		goto err_unreference_tbt_pll;
3608 	}
3609 	intel_reference_shared_dpll(state, crtc,
3610 				    port_dpll->pll, &port_dpll->hw_state);
3611 
3612 	icl_update_active_dpll(state, crtc, encoder);
3613 
3614 	return true;
3615 
3616 err_unreference_tbt_pll:
3617 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3618 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3619 
3620 	return false;
3621 }
3622 
3623 static bool icl_get_dplls(struct intel_atomic_state *state,
3624 			  struct intel_crtc *crtc,
3625 			  struct intel_encoder *encoder)
3626 {
3627 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3628 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3629 
3630 	if (intel_phy_is_combo(dev_priv, phy))
3631 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3632 	else if (intel_phy_is_tc(dev_priv, phy))
3633 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3634 
3635 	MISSING_CASE(phy);
3636 
3637 	return false;
3638 }
3639 
3640 static void icl_put_dplls(struct intel_atomic_state *state,
3641 			  struct intel_crtc *crtc)
3642 {
3643 	const struct intel_crtc_state *old_crtc_state =
3644 		intel_atomic_get_old_crtc_state(state, crtc);
3645 	struct intel_crtc_state *new_crtc_state =
3646 		intel_atomic_get_new_crtc_state(state, crtc);
3647 	enum icl_port_dpll_id id;
3648 
3649 	new_crtc_state->shared_dpll = NULL;
3650 
3651 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3652 		const struct icl_port_dpll *old_port_dpll =
3653 			&old_crtc_state->icl_port_dplls[id];
3654 		struct icl_port_dpll *new_port_dpll =
3655 			&new_crtc_state->icl_port_dplls[id];
3656 
3657 		new_port_dpll->pll = NULL;
3658 
3659 		if (!old_port_dpll->pll)
3660 			continue;
3661 
3662 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3663 	}
3664 }
3665 
3666 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3667 				struct intel_shared_dpll *pll,
3668 				struct intel_dpll_hw_state *hw_state)
3669 {
3670 	const enum intel_dpll_id id = pll->info->id;
3671 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3672 	intel_wakeref_t wakeref;
3673 	bool ret = false;
3674 	u32 val;
3675 
3676 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3677 						     POWER_DOMAIN_DISPLAY_CORE);
3678 	if (!wakeref)
3679 		return false;
3680 
3681 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3682 	if (!(val & PLL_ENABLE))
3683 		goto out;
3684 
3685 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3686 						  MG_REFCLKIN_CTL(tc_port));
3687 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3688 
3689 	hw_state->mg_clktop2_coreclkctl1 =
3690 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3691 	hw_state->mg_clktop2_coreclkctl1 &=
3692 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3693 
3694 	hw_state->mg_clktop2_hsclkctl =
3695 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3696 	hw_state->mg_clktop2_hsclkctl &=
3697 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3698 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3699 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3700 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3701 
3702 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3703 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3704 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3705 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3706 						   MG_PLL_FRAC_LOCK(tc_port));
3707 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3708 
3709 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3710 	hw_state->mg_pll_tdc_coldst_bias =
3711 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3712 
3713 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3714 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3715 		hw_state->mg_pll_bias_mask = 0;
3716 	} else {
3717 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3718 		hw_state->mg_pll_bias_mask = -1U;
3719 	}
3720 
3721 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3722 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3723 
3724 	ret = true;
3725 out:
3726 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3727 	return ret;
3728 }
3729 
3730 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3731 				 struct intel_shared_dpll *pll,
3732 				 struct intel_dpll_hw_state *hw_state)
3733 {
3734 	const enum intel_dpll_id id = pll->info->id;
3735 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3736 	intel_wakeref_t wakeref;
3737 	bool ret = false;
3738 	u32 val;
3739 
3740 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3741 						     POWER_DOMAIN_DISPLAY_CORE);
3742 	if (!wakeref)
3743 		return false;
3744 
3745 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3746 	if (!(val & PLL_ENABLE))
3747 		goto out;
3748 
3749 	/*
3750 	 * All registers read here have the same HIP_INDEX_REG even though
3751 	 * they are on different building blocks
3752 	 */
3753 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3754 		       HIP_INDEX_VAL(tc_port, 0x2));
3755 
3756 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3757 						  DKL_REFCLKIN_CTL(tc_port));
3758 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3759 
3760 	hw_state->mg_clktop2_hsclkctl =
3761 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3762 	hw_state->mg_clktop2_hsclkctl &=
3763 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3764 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3765 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3766 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3767 
3768 	hw_state->mg_clktop2_coreclkctl1 =
3769 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3770 	hw_state->mg_clktop2_coreclkctl1 &=
3771 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3772 
3773 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3774 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3775 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3776 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3777 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3778 
3779 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3780 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3781 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3782 
3783 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3784 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3785 				 DKL_PLL_SSC_STEP_LEN_MASK |
3786 				 DKL_PLL_SSC_STEP_NUM_MASK |
3787 				 DKL_PLL_SSC_EN);
3788 
3789 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3790 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3791 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3792 
3793 	hw_state->mg_pll_tdc_coldst_bias =
3794 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3795 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3796 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3797 
3798 	ret = true;
3799 out:
3800 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3801 	return ret;
3802 }
3803 
3804 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3805 				 struct intel_shared_dpll *pll,
3806 				 struct intel_dpll_hw_state *hw_state,
3807 				 i915_reg_t enable_reg)
3808 {
3809 	const enum intel_dpll_id id = pll->info->id;
3810 	intel_wakeref_t wakeref;
3811 	bool ret = false;
3812 	u32 val;
3813 
3814 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3815 						     POWER_DOMAIN_DISPLAY_CORE);
3816 	if (!wakeref)
3817 		return false;
3818 
3819 	val = intel_de_read(dev_priv, enable_reg);
3820 	if (!(val & PLL_ENABLE))
3821 		goto out;
3822 
3823 	if (IS_ROCKETLAKE(dev_priv)) {
3824 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3825 						 RKL_DPLL_CFGCR0(id));
3826 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3827 						 RKL_DPLL_CFGCR1(id));
3828 	} else if (INTEL_GEN(dev_priv) >= 12) {
3829 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3830 						 TGL_DPLL_CFGCR0(id));
3831 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3832 						 TGL_DPLL_CFGCR1(id));
3833 	} else {
3834 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3835 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3836 							 ICL_DPLL_CFGCR0(4));
3837 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3838 							 ICL_DPLL_CFGCR1(4));
3839 		} else {
3840 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3841 							 ICL_DPLL_CFGCR0(id));
3842 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3843 							 ICL_DPLL_CFGCR1(id));
3844 		}
3845 	}
3846 
3847 	ret = true;
3848 out:
3849 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3850 	return ret;
3851 }
3852 
3853 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3854 				   struct intel_shared_dpll *pll,
3855 				   struct intel_dpll_hw_state *hw_state)
3856 {
3857 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3858 
3859 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3860 }
3861 
3862 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3863 				 struct intel_shared_dpll *pll,
3864 				 struct intel_dpll_hw_state *hw_state)
3865 {
3866 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3867 }
3868 
3869 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3870 			   struct intel_shared_dpll *pll)
3871 {
3872 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3873 	const enum intel_dpll_id id = pll->info->id;
3874 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3875 
3876 	if (IS_ROCKETLAKE(dev_priv)) {
3877 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3878 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3879 	} else if (INTEL_GEN(dev_priv) >= 12) {
3880 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3881 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3882 	} else {
3883 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3884 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3885 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3886 		} else {
3887 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3888 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3889 		}
3890 	}
3891 
3892 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3893 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3894 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3895 }
3896 
3897 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3898 			     struct intel_shared_dpll *pll)
3899 {
3900 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3901 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3902 	u32 val;
3903 
3904 	/*
3905 	 * Some of the following registers have reserved fields, so program
3906 	 * these with RMW based on a mask. The mask can be fixed or generated
3907 	 * during the calc/readout phase if the mask depends on some other HW
3908 	 * state like refclk, see icl_calc_mg_pll_state().
3909 	 */
3910 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3911 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3912 	val |= hw_state->mg_refclkin_ctl;
3913 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3914 
3915 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3916 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3917 	val |= hw_state->mg_clktop2_coreclkctl1;
3918 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3919 
3920 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3921 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3922 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3923 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3924 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3925 	val |= hw_state->mg_clktop2_hsclkctl;
3926 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3927 
3928 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3929 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3930 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3931 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3932 		       hw_state->mg_pll_frac_lock);
3933 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3934 
3935 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3936 	val &= ~hw_state->mg_pll_bias_mask;
3937 	val |= hw_state->mg_pll_bias;
3938 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3939 
3940 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3941 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3942 	val |= hw_state->mg_pll_tdc_coldst_bias;
3943 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3944 
3945 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3946 }
3947 
3948 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3949 			  struct intel_shared_dpll *pll)
3950 {
3951 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3952 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3953 	u32 val;
3954 
3955 	/*
3956 	 * All registers programmed here have the same HIP_INDEX_REG even
3957 	 * though on different building block
3958 	 */
3959 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3960 		       HIP_INDEX_VAL(tc_port, 0x2));
3961 
3962 	/* All the registers are RMW */
3963 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3964 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3965 	val |= hw_state->mg_refclkin_ctl;
3966 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3967 
3968 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3969 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3970 	val |= hw_state->mg_clktop2_coreclkctl1;
3971 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3972 
3973 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3974 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3975 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3976 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3977 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3978 	val |= hw_state->mg_clktop2_hsclkctl;
3979 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3980 
3981 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3982 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3983 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3984 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3985 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3986 	val |= hw_state->mg_pll_div0;
3987 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3988 
3989 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3990 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3991 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3992 	val |= hw_state->mg_pll_div1;
3993 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3994 
3995 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3996 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3997 		 DKL_PLL_SSC_STEP_LEN_MASK |
3998 		 DKL_PLL_SSC_STEP_NUM_MASK |
3999 		 DKL_PLL_SSC_EN);
4000 	val |= hw_state->mg_pll_ssc;
4001 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4002 
4003 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4004 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4005 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4006 	val |= hw_state->mg_pll_bias;
4007 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4008 
4009 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4010 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4011 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4012 	val |= hw_state->mg_pll_tdc_coldst_bias;
4013 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4014 
4015 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4016 }
4017 
4018 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4019 				 struct intel_shared_dpll *pll,
4020 				 i915_reg_t enable_reg)
4021 {
4022 	u32 val;
4023 
4024 	val = intel_de_read(dev_priv, enable_reg);
4025 	val |= PLL_POWER_ENABLE;
4026 	intel_de_write(dev_priv, enable_reg, val);
4027 
4028 	/*
4029 	 * The spec says we need to "wait" but it also says it should be
4030 	 * immediate.
4031 	 */
4032 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4033 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4034 			pll->info->id);
4035 }
4036 
4037 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4038 			   struct intel_shared_dpll *pll,
4039 			   i915_reg_t enable_reg)
4040 {
4041 	u32 val;
4042 
4043 	val = intel_de_read(dev_priv, enable_reg);
4044 	val |= PLL_ENABLE;
4045 	intel_de_write(dev_priv, enable_reg, val);
4046 
4047 	/* Timeout is actually 600us. */
4048 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4049 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4050 }
4051 
4052 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4053 			     struct intel_shared_dpll *pll)
4054 {
4055 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4056 
4057 	if (IS_ELKHARTLAKE(dev_priv) &&
4058 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4059 
4060 		/*
4061 		 * We need to disable DC states when this DPLL is enabled.
4062 		 * This can be done by taking a reference on DPLL4 power
4063 		 * domain.
4064 		 */
4065 		pll->wakeref = intel_display_power_get(dev_priv,
4066 						       POWER_DOMAIN_DPLL_DC_OFF);
4067 	}
4068 
4069 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4070 
4071 	icl_dpll_write(dev_priv, pll);
4072 
4073 	/*
4074 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4075 	 * paths should already be setting the appropriate voltage, hence we do
4076 	 * nothing here.
4077 	 */
4078 
4079 	icl_pll_enable(dev_priv, pll, enable_reg);
4080 
4081 	/* DVFS post sequence would be here. See the comment above. */
4082 }
4083 
4084 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4085 			   struct intel_shared_dpll *pll)
4086 {
4087 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4088 
4089 	icl_dpll_write(dev_priv, pll);
4090 
4091 	/*
4092 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4093 	 * paths should already be setting the appropriate voltage, hence we do
4094 	 * nothing here.
4095 	 */
4096 
4097 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4098 
4099 	/* DVFS post sequence would be here. See the comment above. */
4100 }
4101 
4102 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4103 			  struct intel_shared_dpll *pll)
4104 {
4105 	i915_reg_t enable_reg =
4106 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4107 
4108 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4109 
4110 	if (INTEL_GEN(dev_priv) >= 12)
4111 		dkl_pll_write(dev_priv, pll);
4112 	else
4113 		icl_mg_pll_write(dev_priv, pll);
4114 
4115 	/*
4116 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4117 	 * paths should already be setting the appropriate voltage, hence we do
4118 	 * nothing here.
4119 	 */
4120 
4121 	icl_pll_enable(dev_priv, pll, enable_reg);
4122 
4123 	/* DVFS post sequence would be here. See the comment above. */
4124 }
4125 
4126 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4127 			    struct intel_shared_dpll *pll,
4128 			    i915_reg_t enable_reg)
4129 {
4130 	u32 val;
4131 
4132 	/* The first steps are done by intel_ddi_post_disable(). */
4133 
4134 	/*
4135 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4136 	 * paths should already be setting the appropriate voltage, hence we do
4137 	 * nothign here.
4138 	 */
4139 
4140 	val = intel_de_read(dev_priv, enable_reg);
4141 	val &= ~PLL_ENABLE;
4142 	intel_de_write(dev_priv, enable_reg, val);
4143 
4144 	/* Timeout is actually 1us. */
4145 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4146 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4147 
4148 	/* DVFS post sequence would be here. See the comment above. */
4149 
4150 	val = intel_de_read(dev_priv, enable_reg);
4151 	val &= ~PLL_POWER_ENABLE;
4152 	intel_de_write(dev_priv, enable_reg, val);
4153 
4154 	/*
4155 	 * The spec says we need to "wait" but it also says it should be
4156 	 * immediate.
4157 	 */
4158 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4159 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4160 			pll->info->id);
4161 }
4162 
4163 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4164 			      struct intel_shared_dpll *pll)
4165 {
4166 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4167 
4168 	icl_pll_disable(dev_priv, pll, enable_reg);
4169 
4170 	if (IS_ELKHARTLAKE(dev_priv) &&
4171 	    pll->info->id == DPLL_ID_EHL_DPLL4)
4172 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4173 					pll->wakeref);
4174 }
4175 
4176 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4177 			    struct intel_shared_dpll *pll)
4178 {
4179 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4180 }
4181 
4182 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4183 			   struct intel_shared_dpll *pll)
4184 {
4185 	i915_reg_t enable_reg =
4186 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4187 
4188 	icl_pll_disable(dev_priv, pll, enable_reg);
4189 }
4190 
4191 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4192 {
4193 	/* No SSC ref */
4194 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4195 }
4196 
4197 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4198 			      const struct intel_dpll_hw_state *hw_state)
4199 {
4200 	drm_dbg_kms(&dev_priv->drm,
4201 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4202 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4203 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4204 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4205 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4206 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4207 		    hw_state->cfgcr0, hw_state->cfgcr1,
4208 		    hw_state->mg_refclkin_ctl,
4209 		    hw_state->mg_clktop2_coreclkctl1,
4210 		    hw_state->mg_clktop2_hsclkctl,
4211 		    hw_state->mg_pll_div0,
4212 		    hw_state->mg_pll_div1,
4213 		    hw_state->mg_pll_lf,
4214 		    hw_state->mg_pll_frac_lock,
4215 		    hw_state->mg_pll_ssc,
4216 		    hw_state->mg_pll_bias,
4217 		    hw_state->mg_pll_tdc_coldst_bias);
4218 }
4219 
4220 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4221 	.enable = combo_pll_enable,
4222 	.disable = combo_pll_disable,
4223 	.get_hw_state = combo_pll_get_hw_state,
4224 	.get_freq = icl_ddi_combo_pll_get_freq,
4225 };
4226 
4227 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4228 	.enable = tbt_pll_enable,
4229 	.disable = tbt_pll_disable,
4230 	.get_hw_state = tbt_pll_get_hw_state,
4231 	.get_freq = icl_ddi_tbt_pll_get_freq,
4232 };
4233 
4234 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4235 	.enable = mg_pll_enable,
4236 	.disable = mg_pll_disable,
4237 	.get_hw_state = mg_pll_get_hw_state,
4238 	.get_freq = icl_ddi_mg_pll_get_freq,
4239 };
4240 
4241 static const struct dpll_info icl_plls[] = {
4242 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4243 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4244 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4245 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4246 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4247 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4248 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4249 	{ },
4250 };
4251 
4252 static const struct intel_dpll_mgr icl_pll_mgr = {
4253 	.dpll_info = icl_plls,
4254 	.get_dplls = icl_get_dplls,
4255 	.put_dplls = icl_put_dplls,
4256 	.update_active_dpll = icl_update_active_dpll,
4257 	.update_ref_clks = icl_update_dpll_ref_clks,
4258 	.dump_hw_state = icl_dump_hw_state,
4259 };
4260 
4261 static const struct dpll_info ehl_plls[] = {
4262 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4263 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4264 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4265 	{ },
4266 };
4267 
4268 static const struct intel_dpll_mgr ehl_pll_mgr = {
4269 	.dpll_info = ehl_plls,
4270 	.get_dplls = icl_get_dplls,
4271 	.put_dplls = icl_put_dplls,
4272 	.update_ref_clks = icl_update_dpll_ref_clks,
4273 	.dump_hw_state = icl_dump_hw_state,
4274 };
4275 
4276 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4277 	.enable = mg_pll_enable,
4278 	.disable = mg_pll_disable,
4279 	.get_hw_state = dkl_pll_get_hw_state,
4280 	.get_freq = icl_ddi_mg_pll_get_freq,
4281 };
4282 
4283 static const struct dpll_info tgl_plls[] = {
4284 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4285 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4286 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4287 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4288 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4289 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4290 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4291 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4292 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4293 	{ },
4294 };
4295 
4296 static const struct intel_dpll_mgr tgl_pll_mgr = {
4297 	.dpll_info = tgl_plls,
4298 	.get_dplls = icl_get_dplls,
4299 	.put_dplls = icl_put_dplls,
4300 	.update_active_dpll = icl_update_active_dpll,
4301 	.update_ref_clks = icl_update_dpll_ref_clks,
4302 	.dump_hw_state = icl_dump_hw_state,
4303 };
4304 
4305 static const struct dpll_info rkl_plls[] = {
4306 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4307 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4308 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4309 	{ },
4310 };
4311 
4312 static const struct intel_dpll_mgr rkl_pll_mgr = {
4313 	.dpll_info = rkl_plls,
4314 	.get_dplls = icl_get_dplls,
4315 	.put_dplls = icl_put_dplls,
4316 	.update_ref_clks = icl_update_dpll_ref_clks,
4317 	.dump_hw_state = icl_dump_hw_state,
4318 };
4319 
4320 /**
4321  * intel_shared_dpll_init - Initialize shared DPLLs
4322  * @dev: drm device
4323  *
4324  * Initialize shared DPLLs for @dev.
4325  */
4326 void intel_shared_dpll_init(struct drm_device *dev)
4327 {
4328 	struct drm_i915_private *dev_priv = to_i915(dev);
4329 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4330 	const struct dpll_info *dpll_info;
4331 	int i;
4332 
4333 	if (IS_ROCKETLAKE(dev_priv))
4334 		dpll_mgr = &rkl_pll_mgr;
4335 	else if (INTEL_GEN(dev_priv) >= 12)
4336 		dpll_mgr = &tgl_pll_mgr;
4337 	else if (IS_ELKHARTLAKE(dev_priv))
4338 		dpll_mgr = &ehl_pll_mgr;
4339 	else if (INTEL_GEN(dev_priv) >= 11)
4340 		dpll_mgr = &icl_pll_mgr;
4341 	else if (IS_CANNONLAKE(dev_priv))
4342 		dpll_mgr = &cnl_pll_mgr;
4343 	else if (IS_GEN9_BC(dev_priv))
4344 		dpll_mgr = &skl_pll_mgr;
4345 	else if (IS_GEN9_LP(dev_priv))
4346 		dpll_mgr = &bxt_pll_mgr;
4347 	else if (HAS_DDI(dev_priv))
4348 		dpll_mgr = &hsw_pll_mgr;
4349 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4350 		dpll_mgr = &pch_pll_mgr;
4351 
4352 	if (!dpll_mgr) {
4353 		dev_priv->dpll.num_shared_dpll = 0;
4354 		return;
4355 	}
4356 
4357 	dpll_info = dpll_mgr->dpll_info;
4358 
4359 	for (i = 0; dpll_info[i].name; i++) {
4360 		drm_WARN_ON(dev, i != dpll_info[i].id);
4361 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4362 	}
4363 
4364 	dev_priv->dpll.mgr = dpll_mgr;
4365 	dev_priv->dpll.num_shared_dpll = i;
4366 	mutex_init(&dev_priv->dpll.lock);
4367 
4368 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4369 }
4370 
4371 /**
4372  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4373  * @state: atomic state
4374  * @crtc: CRTC to reserve DPLLs for
4375  * @encoder: encoder
4376  *
4377  * This function reserves all required DPLLs for the given CRTC and encoder
4378  * combination in the current atomic commit @state and the new @crtc atomic
4379  * state.
4380  *
4381  * The new configuration in the atomic commit @state is made effective by
4382  * calling intel_shared_dpll_swap_state().
4383  *
4384  * The reserved DPLLs should be released by calling
4385  * intel_release_shared_dplls().
4386  *
4387  * Returns:
4388  * True if all required DPLLs were successfully reserved.
4389  */
4390 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4391 				struct intel_crtc *crtc,
4392 				struct intel_encoder *encoder)
4393 {
4394 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4395 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4396 
4397 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4398 		return false;
4399 
4400 	return dpll_mgr->get_dplls(state, crtc, encoder);
4401 }
4402 
4403 /**
4404  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4405  * @state: atomic state
4406  * @crtc: crtc from which the DPLLs are to be released
4407  *
4408  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4409  * from the current atomic commit @state and the old @crtc atomic state.
4410  *
4411  * The new configuration in the atomic commit @state is made effective by
4412  * calling intel_shared_dpll_swap_state().
4413  */
4414 void intel_release_shared_dplls(struct intel_atomic_state *state,
4415 				struct intel_crtc *crtc)
4416 {
4417 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4418 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4419 
4420 	/*
4421 	 * FIXME: this function is called for every platform having a
4422 	 * compute_clock hook, even though the platform doesn't yet support
4423 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4424 	 * called on those.
4425 	 */
4426 	if (!dpll_mgr)
4427 		return;
4428 
4429 	dpll_mgr->put_dplls(state, crtc);
4430 }
4431 
4432 /**
4433  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4434  * @state: atomic state
4435  * @crtc: the CRTC for which to update the active DPLL
4436  * @encoder: encoder determining the type of port DPLL
4437  *
4438  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4439  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4440  * DPLL selected will be based on the current mode of the encoder's port.
4441  */
4442 void intel_update_active_dpll(struct intel_atomic_state *state,
4443 			      struct intel_crtc *crtc,
4444 			      struct intel_encoder *encoder)
4445 {
4446 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4447 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4448 
4449 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4450 		return;
4451 
4452 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4453 }
4454 
4455 /**
4456  * intel_dpll_get_freq - calculate the DPLL's output frequency
4457  * @i915: i915 device
4458  * @pll: DPLL for which to calculate the output frequency
4459  *
4460  * Return the output frequency corresponding to @pll's current state.
4461  */
4462 int intel_dpll_get_freq(struct drm_i915_private *i915,
4463 			const struct intel_shared_dpll *pll)
4464 {
4465 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4466 		return 0;
4467 
4468 	return pll->info->funcs->get_freq(i915, pll);
4469 }
4470 
4471 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4472 				  struct intel_shared_dpll *pll)
4473 {
4474 	struct intel_crtc *crtc;
4475 
4476 	pll->on = pll->info->funcs->get_hw_state(i915, pll,
4477 						 &pll->state.hw_state);
4478 
4479 	if (IS_ELKHARTLAKE(i915) && pll->on &&
4480 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4481 		pll->wakeref = intel_display_power_get(i915,
4482 						       POWER_DOMAIN_DPLL_DC_OFF);
4483 	}
4484 
4485 	pll->state.crtc_mask = 0;
4486 	for_each_intel_crtc(&i915->drm, crtc) {
4487 		struct intel_crtc_state *crtc_state =
4488 			to_intel_crtc_state(crtc->base.state);
4489 
4490 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4491 			pll->state.crtc_mask |= 1 << crtc->pipe;
4492 	}
4493 	pll->active_mask = pll->state.crtc_mask;
4494 
4495 	drm_dbg_kms(&i915->drm,
4496 		    "%s hw state readout: crtc_mask 0x%08x, on %i\n",
4497 		    pll->info->name, pll->state.crtc_mask, pll->on);
4498 }
4499 
4500 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4501 {
4502 	int i;
4503 
4504 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4505 		i915->dpll.mgr->update_ref_clks(i915);
4506 
4507 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4508 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4509 }
4510 
4511 static void sanitize_dpll_state(struct drm_i915_private *i915,
4512 				struct intel_shared_dpll *pll)
4513 {
4514 	if (!pll->on || pll->active_mask)
4515 		return;
4516 
4517 	drm_dbg_kms(&i915->drm,
4518 		    "%s enabled but not in use, disabling\n",
4519 		    pll->info->name);
4520 
4521 	pll->info->funcs->disable(i915, pll);
4522 	pll->on = false;
4523 }
4524 
4525 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4526 {
4527 	int i;
4528 
4529 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4530 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4531 }
4532 
4533 /**
4534  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
4535  * @dev_priv: i915 drm device
4536  * @hw_state: hw state to be written to the log
4537  *
4538  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4539  */
4540 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4541 			      const struct intel_dpll_hw_state *hw_state)
4542 {
4543 	if (dev_priv->dpll.mgr) {
4544 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4545 	} else {
4546 		/* fallback for platforms that don't use the shared dpll
4547 		 * infrastructure
4548 		 */
4549 		drm_dbg_kms(&dev_priv->drm,
4550 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4551 			    "fp0: 0x%x, fp1: 0x%x\n",
4552 			    hw_state->dpll,
4553 			    hw_state->dpll_md,
4554 			    hw_state->fp0,
4555 			    hw_state->fp1);
4556 	}
4557 }
4558