1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_de.h"
25 #include "intel_display_types.h"
26 #include "intel_dpio_phy.h"
27 #include "intel_dpll.h"
28 #include "intel_dpll_mgr.h"
29 #include "intel_tc.h"
30 
31 /**
32  * DOC: Display PLLs
33  *
34  * Display PLLs used for driving outputs vary by platform. While some have
35  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
36  * from a pool. In the latter scenario, it is possible that multiple pipes
37  * share a PLL if their configurations match.
38  *
39  * This file provides an abstraction over display PLLs. The function
40  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
41  * users of a PLL are tracked and that tracking is integrated with the atomic
42  * modset interface. During an atomic operation, required PLLs can be reserved
43  * for a given CRTC and encoder configuration by calling
44  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
45  * with intel_release_shared_dplls().
46  * Changes to the users are first staged in the atomic state, and then made
47  * effective by calling intel_shared_dpll_swap_state() during the atomic
48  * commit phase.
49  */
50 
51 struct intel_dpll_mgr {
52 	const struct dpll_info *dpll_info;
53 
54 	bool (*get_dplls)(struct intel_atomic_state *state,
55 			  struct intel_crtc *crtc,
56 			  struct intel_encoder *encoder);
57 	void (*put_dplls)(struct intel_atomic_state *state,
58 			  struct intel_crtc *crtc);
59 	void (*update_active_dpll)(struct intel_atomic_state *state,
60 				   struct intel_crtc *crtc,
61 				   struct intel_encoder *encoder);
62 	void (*update_ref_clks)(struct drm_i915_private *i915);
63 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
64 			      const struct intel_dpll_hw_state *hw_state);
65 };
66 
67 static void
68 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
69 				  struct intel_shared_dpll_state *shared_dpll)
70 {
71 	enum intel_dpll_id i;
72 
73 	/* Copy shared dpll state */
74 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
75 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
76 
77 		shared_dpll[i] = pll->state;
78 	}
79 }
80 
81 static struct intel_shared_dpll_state *
82 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
83 {
84 	struct intel_atomic_state *state = to_intel_atomic_state(s);
85 
86 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
87 
88 	if (!state->dpll_set) {
89 		state->dpll_set = true;
90 
91 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
92 						  state->shared_dpll);
93 	}
94 
95 	return state->shared_dpll;
96 }
97 
98 /**
99  * intel_get_shared_dpll_by_id - get a DPLL given its id
100  * @dev_priv: i915 device instance
101  * @id: pll id
102  *
103  * Returns:
104  * A pointer to the DPLL with @id
105  */
106 struct intel_shared_dpll *
107 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
108 			    enum intel_dpll_id id)
109 {
110 	return &dev_priv->dpll.shared_dplls[id];
111 }
112 
113 /**
114  * intel_get_shared_dpll_id - get the id of a DPLL
115  * @dev_priv: i915 device instance
116  * @pll: the DPLL
117  *
118  * Returns:
119  * The id of @pll
120  */
121 enum intel_dpll_id
122 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
123 			 struct intel_shared_dpll *pll)
124 {
125 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
126 
127 	if (drm_WARN_ON(&dev_priv->drm,
128 			pll_idx < 0 ||
129 			pll_idx >= dev_priv->dpll.num_shared_dpll))
130 		return -1;
131 
132 	return pll_idx;
133 }
134 
135 /* For ILK+ */
136 void assert_shared_dpll(struct drm_i915_private *dev_priv,
137 			struct intel_shared_dpll *pll,
138 			bool state)
139 {
140 	bool cur_state;
141 	struct intel_dpll_hw_state hw_state;
142 
143 	if (drm_WARN(&dev_priv->drm, !pll,
144 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
145 		return;
146 
147 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
148 	I915_STATE_WARN(cur_state != state,
149 	     "%s assertion failure (expected %s, current %s)\n",
150 			pll->info->name, onoff(state), onoff(cur_state));
151 }
152 
153 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
154 {
155 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
156 }
157 
158 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
159 {
160 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
161 }
162 
163 static i915_reg_t
164 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
165 			   struct intel_shared_dpll *pll)
166 {
167 	if (IS_DG1(i915))
168 		return DG1_DPLL_ENABLE(pll->info->id);
169 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
170 		return MG_PLL_ENABLE(0);
171 
172 	return ICL_DPLL_ENABLE(pll->info->id);
173 }
174 
175 static i915_reg_t
176 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
177 			struct intel_shared_dpll *pll)
178 {
179 	const enum intel_dpll_id id = pll->info->id;
180 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
181 
182 	if (IS_ALDERLAKE_P(i915))
183 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
184 
185 	return MG_PLL_ENABLE(tc_port);
186 }
187 
188 /**
189  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
190  * @crtc_state: CRTC, and its state, which has a shared DPLL
191  *
192  * Enable the shared DPLL used by @crtc.
193  */
194 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
195 {
196 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
197 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
198 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
199 	unsigned int pipe_mask = BIT(crtc->pipe);
200 	unsigned int old_mask;
201 
202 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
203 		return;
204 
205 	mutex_lock(&dev_priv->dpll.lock);
206 	old_mask = pll->active_mask;
207 
208 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
209 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
210 		goto out;
211 
212 	pll->active_mask |= pipe_mask;
213 
214 	drm_dbg_kms(&dev_priv->drm,
215 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
216 		    pll->info->name, pll->active_mask, pll->on,
217 		    crtc->base.base.id, crtc->base.name);
218 
219 	if (old_mask) {
220 		drm_WARN_ON(&dev_priv->drm, !pll->on);
221 		assert_shared_dpll_enabled(dev_priv, pll);
222 		goto out;
223 	}
224 	drm_WARN_ON(&dev_priv->drm, pll->on);
225 
226 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
227 	pll->info->funcs->enable(dev_priv, pll);
228 	pll->on = true;
229 
230 out:
231 	mutex_unlock(&dev_priv->dpll.lock);
232 }
233 
234 /**
235  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
236  * @crtc_state: CRTC, and its state, which has a shared DPLL
237  *
238  * Disable the shared DPLL used by @crtc.
239  */
240 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
241 {
242 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
243 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
244 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
245 	unsigned int pipe_mask = BIT(crtc->pipe);
246 
247 	/* PCH only available on ILK+ */
248 	if (DISPLAY_VER(dev_priv) < 5)
249 		return;
250 
251 	if (pll == NULL)
252 		return;
253 
254 	mutex_lock(&dev_priv->dpll.lock);
255 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
256 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
257 		     crtc->base.base.id, crtc->base.name))
258 		goto out;
259 
260 	drm_dbg_kms(&dev_priv->drm,
261 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
262 		    pll->info->name, pll->active_mask, pll->on,
263 		    crtc->base.base.id, crtc->base.name);
264 
265 	assert_shared_dpll_enabled(dev_priv, pll);
266 	drm_WARN_ON(&dev_priv->drm, !pll->on);
267 
268 	pll->active_mask &= ~pipe_mask;
269 	if (pll->active_mask)
270 		goto out;
271 
272 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
273 	pll->info->funcs->disable(dev_priv, pll);
274 	pll->on = false;
275 
276 out:
277 	mutex_unlock(&dev_priv->dpll.lock);
278 }
279 
280 static struct intel_shared_dpll *
281 intel_find_shared_dpll(struct intel_atomic_state *state,
282 		       const struct intel_crtc *crtc,
283 		       const struct intel_dpll_hw_state *pll_state,
284 		       unsigned long dpll_mask)
285 {
286 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 	struct intel_shared_dpll *pll, *unused_pll = NULL;
288 	struct intel_shared_dpll_state *shared_dpll;
289 	enum intel_dpll_id i;
290 
291 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
292 
293 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
294 
295 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
296 		pll = &dev_priv->dpll.shared_dplls[i];
297 
298 		/* Only want to check enabled timings first */
299 		if (shared_dpll[i].pipe_mask == 0) {
300 			if (!unused_pll)
301 				unused_pll = pll;
302 			continue;
303 		}
304 
305 		if (memcmp(pll_state,
306 			   &shared_dpll[i].hw_state,
307 			   sizeof(*pll_state)) == 0) {
308 			drm_dbg_kms(&dev_priv->drm,
309 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
310 				    crtc->base.base.id, crtc->base.name,
311 				    pll->info->name,
312 				    shared_dpll[i].pipe_mask,
313 				    pll->active_mask);
314 			return pll;
315 		}
316 	}
317 
318 	/* Ok no matching timings, maybe there's a free one? */
319 	if (unused_pll) {
320 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
321 			    crtc->base.base.id, crtc->base.name,
322 			    unused_pll->info->name);
323 		return unused_pll;
324 	}
325 
326 	return NULL;
327 }
328 
329 static void
330 intel_reference_shared_dpll(struct intel_atomic_state *state,
331 			    const struct intel_crtc *crtc,
332 			    const struct intel_shared_dpll *pll,
333 			    const struct intel_dpll_hw_state *pll_state)
334 {
335 	struct drm_i915_private *i915 = to_i915(state->base.dev);
336 	struct intel_shared_dpll_state *shared_dpll;
337 	const enum intel_dpll_id id = pll->info->id;
338 
339 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
340 
341 	if (shared_dpll[id].pipe_mask == 0)
342 		shared_dpll[id].hw_state = *pll_state;
343 
344 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
345 		pipe_name(crtc->pipe));
346 
347 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
348 }
349 
350 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
351 					  const struct intel_crtc *crtc,
352 					  const struct intel_shared_dpll *pll)
353 {
354 	struct intel_shared_dpll_state *shared_dpll;
355 
356 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
357 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
358 }
359 
360 static void intel_put_dpll(struct intel_atomic_state *state,
361 			   struct intel_crtc *crtc)
362 {
363 	const struct intel_crtc_state *old_crtc_state =
364 		intel_atomic_get_old_crtc_state(state, crtc);
365 	struct intel_crtc_state *new_crtc_state =
366 		intel_atomic_get_new_crtc_state(state, crtc);
367 
368 	new_crtc_state->shared_dpll = NULL;
369 
370 	if (!old_crtc_state->shared_dpll)
371 		return;
372 
373 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
374 }
375 
376 /**
377  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
378  * @state: atomic state
379  *
380  * This is the dpll version of drm_atomic_helper_swap_state() since the
381  * helper does not handle driver-specific global state.
382  *
383  * For consistency with atomic helpers this function does a complete swap,
384  * i.e. it also puts the current state into @state, even though there is no
385  * need for that at this moment.
386  */
387 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
388 {
389 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
390 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
391 	enum intel_dpll_id i;
392 
393 	if (!state->dpll_set)
394 		return;
395 
396 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
397 		struct intel_shared_dpll *pll =
398 			&dev_priv->dpll.shared_dplls[i];
399 
400 		swap(pll->state, shared_dpll[i]);
401 	}
402 }
403 
404 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
405 				      struct intel_shared_dpll *pll,
406 				      struct intel_dpll_hw_state *hw_state)
407 {
408 	const enum intel_dpll_id id = pll->info->id;
409 	intel_wakeref_t wakeref;
410 	u32 val;
411 
412 	wakeref = intel_display_power_get_if_enabled(dev_priv,
413 						     POWER_DOMAIN_DISPLAY_CORE);
414 	if (!wakeref)
415 		return false;
416 
417 	val = intel_de_read(dev_priv, PCH_DPLL(id));
418 	hw_state->dpll = val;
419 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
420 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
421 
422 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
423 
424 	return val & DPLL_VCO_ENABLE;
425 }
426 
427 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
428 {
429 	u32 val;
430 	bool enabled;
431 
432 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
433 
434 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
435 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
436 			    DREF_SUPERSPREAD_SOURCE_MASK));
437 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
438 }
439 
440 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
441 				struct intel_shared_dpll *pll)
442 {
443 	const enum intel_dpll_id id = pll->info->id;
444 
445 	/* PCH refclock must be enabled first */
446 	ibx_assert_pch_refclk_enabled(dev_priv);
447 
448 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
449 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
450 
451 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
452 
453 	/* Wait for the clocks to stabilize. */
454 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
455 	udelay(150);
456 
457 	/* The pixel multiplier can only be updated once the
458 	 * DPLL is enabled and the clocks are stable.
459 	 *
460 	 * So write it again.
461 	 */
462 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
463 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
464 	udelay(200);
465 }
466 
467 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
468 				 struct intel_shared_dpll *pll)
469 {
470 	const enum intel_dpll_id id = pll->info->id;
471 
472 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
473 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
474 	udelay(200);
475 }
476 
477 static bool ibx_get_dpll(struct intel_atomic_state *state,
478 			 struct intel_crtc *crtc,
479 			 struct intel_encoder *encoder)
480 {
481 	struct intel_crtc_state *crtc_state =
482 		intel_atomic_get_new_crtc_state(state, crtc);
483 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
484 	struct intel_shared_dpll *pll;
485 	enum intel_dpll_id i;
486 
487 	if (HAS_PCH_IBX(dev_priv)) {
488 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
489 		i = (enum intel_dpll_id) crtc->pipe;
490 		pll = &dev_priv->dpll.shared_dplls[i];
491 
492 		drm_dbg_kms(&dev_priv->drm,
493 			    "[CRTC:%d:%s] using pre-allocated %s\n",
494 			    crtc->base.base.id, crtc->base.name,
495 			    pll->info->name);
496 	} else {
497 		pll = intel_find_shared_dpll(state, crtc,
498 					     &crtc_state->dpll_hw_state,
499 					     BIT(DPLL_ID_PCH_PLL_B) |
500 					     BIT(DPLL_ID_PCH_PLL_A));
501 	}
502 
503 	if (!pll)
504 		return false;
505 
506 	/* reference the pll */
507 	intel_reference_shared_dpll(state, crtc,
508 				    pll, &crtc_state->dpll_hw_state);
509 
510 	crtc_state->shared_dpll = pll;
511 
512 	return true;
513 }
514 
515 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
516 			      const struct intel_dpll_hw_state *hw_state)
517 {
518 	drm_dbg_kms(&dev_priv->drm,
519 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
520 		    "fp0: 0x%x, fp1: 0x%x\n",
521 		    hw_state->dpll,
522 		    hw_state->dpll_md,
523 		    hw_state->fp0,
524 		    hw_state->fp1);
525 }
526 
527 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
528 	.enable = ibx_pch_dpll_enable,
529 	.disable = ibx_pch_dpll_disable,
530 	.get_hw_state = ibx_pch_dpll_get_hw_state,
531 };
532 
533 static const struct dpll_info pch_plls[] = {
534 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
535 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
536 	{ },
537 };
538 
539 static const struct intel_dpll_mgr pch_pll_mgr = {
540 	.dpll_info = pch_plls,
541 	.get_dplls = ibx_get_dpll,
542 	.put_dplls = intel_put_dpll,
543 	.dump_hw_state = ibx_dump_hw_state,
544 };
545 
546 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
547 			       struct intel_shared_dpll *pll)
548 {
549 	const enum intel_dpll_id id = pll->info->id;
550 
551 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
552 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
553 	udelay(20);
554 }
555 
556 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
557 				struct intel_shared_dpll *pll)
558 {
559 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
560 	intel_de_posting_read(dev_priv, SPLL_CTL);
561 	udelay(20);
562 }
563 
564 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
565 				  struct intel_shared_dpll *pll)
566 {
567 	const enum intel_dpll_id id = pll->info->id;
568 	u32 val;
569 
570 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
571 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
572 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
573 
574 	/*
575 	 * Try to set up the PCH reference clock once all DPLLs
576 	 * that depend on it have been shut down.
577 	 */
578 	if (dev_priv->pch_ssc_use & BIT(id))
579 		intel_init_pch_refclk(dev_priv);
580 }
581 
582 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
583 				 struct intel_shared_dpll *pll)
584 {
585 	enum intel_dpll_id id = pll->info->id;
586 	u32 val;
587 
588 	val = intel_de_read(dev_priv, SPLL_CTL);
589 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
590 	intel_de_posting_read(dev_priv, SPLL_CTL);
591 
592 	/*
593 	 * Try to set up the PCH reference clock once all DPLLs
594 	 * that depend on it have been shut down.
595 	 */
596 	if (dev_priv->pch_ssc_use & BIT(id))
597 		intel_init_pch_refclk(dev_priv);
598 }
599 
600 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
601 				       struct intel_shared_dpll *pll,
602 				       struct intel_dpll_hw_state *hw_state)
603 {
604 	const enum intel_dpll_id id = pll->info->id;
605 	intel_wakeref_t wakeref;
606 	u32 val;
607 
608 	wakeref = intel_display_power_get_if_enabled(dev_priv,
609 						     POWER_DOMAIN_DISPLAY_CORE);
610 	if (!wakeref)
611 		return false;
612 
613 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
614 	hw_state->wrpll = val;
615 
616 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
617 
618 	return val & WRPLL_PLL_ENABLE;
619 }
620 
621 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
622 				      struct intel_shared_dpll *pll,
623 				      struct intel_dpll_hw_state *hw_state)
624 {
625 	intel_wakeref_t wakeref;
626 	u32 val;
627 
628 	wakeref = intel_display_power_get_if_enabled(dev_priv,
629 						     POWER_DOMAIN_DISPLAY_CORE);
630 	if (!wakeref)
631 		return false;
632 
633 	val = intel_de_read(dev_priv, SPLL_CTL);
634 	hw_state->spll = val;
635 
636 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
637 
638 	return val & SPLL_PLL_ENABLE;
639 }
640 
641 #define LC_FREQ 2700
642 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
643 
644 #define P_MIN 2
645 #define P_MAX 64
646 #define P_INC 2
647 
648 /* Constraints for PLL good behavior */
649 #define REF_MIN 48
650 #define REF_MAX 400
651 #define VCO_MIN 2400
652 #define VCO_MAX 4800
653 
654 struct hsw_wrpll_rnp {
655 	unsigned p, n2, r2;
656 };
657 
658 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
659 {
660 	unsigned budget;
661 
662 	switch (clock) {
663 	case 25175000:
664 	case 25200000:
665 	case 27000000:
666 	case 27027000:
667 	case 37762500:
668 	case 37800000:
669 	case 40500000:
670 	case 40541000:
671 	case 54000000:
672 	case 54054000:
673 	case 59341000:
674 	case 59400000:
675 	case 72000000:
676 	case 74176000:
677 	case 74250000:
678 	case 81000000:
679 	case 81081000:
680 	case 89012000:
681 	case 89100000:
682 	case 108000000:
683 	case 108108000:
684 	case 111264000:
685 	case 111375000:
686 	case 148352000:
687 	case 148500000:
688 	case 162000000:
689 	case 162162000:
690 	case 222525000:
691 	case 222750000:
692 	case 296703000:
693 	case 297000000:
694 		budget = 0;
695 		break;
696 	case 233500000:
697 	case 245250000:
698 	case 247750000:
699 	case 253250000:
700 	case 298000000:
701 		budget = 1500;
702 		break;
703 	case 169128000:
704 	case 169500000:
705 	case 179500000:
706 	case 202000000:
707 		budget = 2000;
708 		break;
709 	case 256250000:
710 	case 262500000:
711 	case 270000000:
712 	case 272500000:
713 	case 273750000:
714 	case 280750000:
715 	case 281250000:
716 	case 286000000:
717 	case 291750000:
718 		budget = 4000;
719 		break;
720 	case 267250000:
721 	case 268500000:
722 		budget = 5000;
723 		break;
724 	default:
725 		budget = 1000;
726 		break;
727 	}
728 
729 	return budget;
730 }
731 
732 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
733 				 unsigned int r2, unsigned int n2,
734 				 unsigned int p,
735 				 struct hsw_wrpll_rnp *best)
736 {
737 	u64 a, b, c, d, diff, diff_best;
738 
739 	/* No best (r,n,p) yet */
740 	if (best->p == 0) {
741 		best->p = p;
742 		best->n2 = n2;
743 		best->r2 = r2;
744 		return;
745 	}
746 
747 	/*
748 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
749 	 * freq2k.
750 	 *
751 	 * delta = 1e6 *
752 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
753 	 *	   freq2k;
754 	 *
755 	 * and we would like delta <= budget.
756 	 *
757 	 * If the discrepancy is above the PPM-based budget, always prefer to
758 	 * improve upon the previous solution.  However, if you're within the
759 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
760 	 */
761 	a = freq2k * budget * p * r2;
762 	b = freq2k * budget * best->p * best->r2;
763 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
764 	diff_best = abs_diff(freq2k * best->p * best->r2,
765 			     LC_FREQ_2K * best->n2);
766 	c = 1000000 * diff;
767 	d = 1000000 * diff_best;
768 
769 	if (a < c && b < d) {
770 		/* If both are above the budget, pick the closer */
771 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
772 			best->p = p;
773 			best->n2 = n2;
774 			best->r2 = r2;
775 		}
776 	} else if (a >= c && b < d) {
777 		/* If A is below the threshold but B is above it?  Update. */
778 		best->p = p;
779 		best->n2 = n2;
780 		best->r2 = r2;
781 	} else if (a >= c && b >= d) {
782 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
783 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
784 			best->p = p;
785 			best->n2 = n2;
786 			best->r2 = r2;
787 		}
788 	}
789 	/* Otherwise a < c && b >= d, do nothing */
790 }
791 
792 static void
793 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
794 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
795 {
796 	u64 freq2k;
797 	unsigned p, n2, r2;
798 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
799 	unsigned budget;
800 
801 	freq2k = clock / 100;
802 
803 	budget = hsw_wrpll_get_budget_for_freq(clock);
804 
805 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
806 	 * and directly pass the LC PLL to it. */
807 	if (freq2k == 5400000) {
808 		*n2_out = 2;
809 		*p_out = 1;
810 		*r2_out = 2;
811 		return;
812 	}
813 
814 	/*
815 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
816 	 * the WR PLL.
817 	 *
818 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
819 	 * Injecting R2 = 2 * R gives:
820 	 *   REF_MAX * r2 > LC_FREQ * 2 and
821 	 *   REF_MIN * r2 < LC_FREQ * 2
822 	 *
823 	 * Which means the desired boundaries for r2 are:
824 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
825 	 *
826 	 */
827 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
828 	     r2 <= LC_FREQ * 2 / REF_MIN;
829 	     r2++) {
830 
831 		/*
832 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
833 		 *
834 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
835 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
836 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
837 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
838 		 *
839 		 * Which means the desired boundaries for n2 are:
840 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
841 		 */
842 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
843 		     n2 <= VCO_MAX * r2 / LC_FREQ;
844 		     n2++) {
845 
846 			for (p = P_MIN; p <= P_MAX; p += P_INC)
847 				hsw_wrpll_update_rnp(freq2k, budget,
848 						     r2, n2, p, &best);
849 		}
850 	}
851 
852 	*n2_out = best.n2;
853 	*p_out = best.p;
854 	*r2_out = best.r2;
855 }
856 
857 static struct intel_shared_dpll *
858 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
859 		       struct intel_crtc *crtc)
860 {
861 	struct intel_crtc_state *crtc_state =
862 		intel_atomic_get_new_crtc_state(state, crtc);
863 	struct intel_shared_dpll *pll;
864 	u32 val;
865 	unsigned int p, n2, r2;
866 
867 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
868 
869 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
870 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
871 	      WRPLL_DIVIDER_POST(p);
872 
873 	crtc_state->dpll_hw_state.wrpll = val;
874 
875 	pll = intel_find_shared_dpll(state, crtc,
876 				     &crtc_state->dpll_hw_state,
877 				     BIT(DPLL_ID_WRPLL2) |
878 				     BIT(DPLL_ID_WRPLL1));
879 
880 	if (!pll)
881 		return NULL;
882 
883 	return pll;
884 }
885 
886 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
887 				  const struct intel_shared_dpll *pll,
888 				  const struct intel_dpll_hw_state *pll_state)
889 {
890 	int refclk;
891 	int n, p, r;
892 	u32 wrpll = pll_state->wrpll;
893 
894 	switch (wrpll & WRPLL_REF_MASK) {
895 	case WRPLL_REF_SPECIAL_HSW:
896 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
897 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
898 			refclk = dev_priv->dpll.ref_clks.nssc;
899 			break;
900 		}
901 		fallthrough;
902 	case WRPLL_REF_PCH_SSC:
903 		/*
904 		 * We could calculate spread here, but our checking
905 		 * code only cares about 5% accuracy, and spread is a max of
906 		 * 0.5% downspread.
907 		 */
908 		refclk = dev_priv->dpll.ref_clks.ssc;
909 		break;
910 	case WRPLL_REF_LCPLL:
911 		refclk = 2700000;
912 		break;
913 	default:
914 		MISSING_CASE(wrpll);
915 		return 0;
916 	}
917 
918 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
919 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
920 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
921 
922 	/* Convert to KHz, p & r have a fixed point portion */
923 	return (refclk * n / 10) / (p * r) * 2;
924 }
925 
926 static struct intel_shared_dpll *
927 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
928 {
929 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
930 	struct intel_shared_dpll *pll;
931 	enum intel_dpll_id pll_id;
932 	int clock = crtc_state->port_clock;
933 
934 	switch (clock / 2) {
935 	case 81000:
936 		pll_id = DPLL_ID_LCPLL_810;
937 		break;
938 	case 135000:
939 		pll_id = DPLL_ID_LCPLL_1350;
940 		break;
941 	case 270000:
942 		pll_id = DPLL_ID_LCPLL_2700;
943 		break;
944 	default:
945 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
946 			    clock);
947 		return NULL;
948 	}
949 
950 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
951 
952 	if (!pll)
953 		return NULL;
954 
955 	return pll;
956 }
957 
958 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
959 				  const struct intel_shared_dpll *pll,
960 				  const struct intel_dpll_hw_state *pll_state)
961 {
962 	int link_clock = 0;
963 
964 	switch (pll->info->id) {
965 	case DPLL_ID_LCPLL_810:
966 		link_clock = 81000;
967 		break;
968 	case DPLL_ID_LCPLL_1350:
969 		link_clock = 135000;
970 		break;
971 	case DPLL_ID_LCPLL_2700:
972 		link_clock = 270000;
973 		break;
974 	default:
975 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
976 		break;
977 	}
978 
979 	return link_clock * 2;
980 }
981 
982 static struct intel_shared_dpll *
983 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
984 		      struct intel_crtc *crtc)
985 {
986 	struct intel_crtc_state *crtc_state =
987 		intel_atomic_get_new_crtc_state(state, crtc);
988 
989 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
990 		return NULL;
991 
992 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
993 					 SPLL_REF_MUXED_SSC;
994 
995 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
996 				      BIT(DPLL_ID_SPLL));
997 }
998 
999 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1000 				 const struct intel_shared_dpll *pll,
1001 				 const struct intel_dpll_hw_state *pll_state)
1002 {
1003 	int link_clock = 0;
1004 
1005 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1006 	case SPLL_FREQ_810MHz:
1007 		link_clock = 81000;
1008 		break;
1009 	case SPLL_FREQ_1350MHz:
1010 		link_clock = 135000;
1011 		break;
1012 	case SPLL_FREQ_2700MHz:
1013 		link_clock = 270000;
1014 		break;
1015 	default:
1016 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1017 		break;
1018 	}
1019 
1020 	return link_clock * 2;
1021 }
1022 
1023 static bool hsw_get_dpll(struct intel_atomic_state *state,
1024 			 struct intel_crtc *crtc,
1025 			 struct intel_encoder *encoder)
1026 {
1027 	struct intel_crtc_state *crtc_state =
1028 		intel_atomic_get_new_crtc_state(state, crtc);
1029 	struct intel_shared_dpll *pll;
1030 
1031 	memset(&crtc_state->dpll_hw_state, 0,
1032 	       sizeof(crtc_state->dpll_hw_state));
1033 
1034 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1035 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1036 	else if (intel_crtc_has_dp_encoder(crtc_state))
1037 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1038 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1039 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1040 	else
1041 		return false;
1042 
1043 	if (!pll)
1044 		return false;
1045 
1046 	intel_reference_shared_dpll(state, crtc,
1047 				    pll, &crtc_state->dpll_hw_state);
1048 
1049 	crtc_state->shared_dpll = pll;
1050 
1051 	return true;
1052 }
1053 
1054 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1055 {
1056 	i915->dpll.ref_clks.ssc = 135000;
1057 	/* Non-SSC is only used on non-ULT HSW. */
1058 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1059 		i915->dpll.ref_clks.nssc = 24000;
1060 	else
1061 		i915->dpll.ref_clks.nssc = 135000;
1062 }
1063 
1064 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1065 			      const struct intel_dpll_hw_state *hw_state)
1066 {
1067 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1068 		    hw_state->wrpll, hw_state->spll);
1069 }
1070 
1071 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1072 	.enable = hsw_ddi_wrpll_enable,
1073 	.disable = hsw_ddi_wrpll_disable,
1074 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1075 	.get_freq = hsw_ddi_wrpll_get_freq,
1076 };
1077 
1078 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1079 	.enable = hsw_ddi_spll_enable,
1080 	.disable = hsw_ddi_spll_disable,
1081 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1082 	.get_freq = hsw_ddi_spll_get_freq,
1083 };
1084 
1085 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1086 				 struct intel_shared_dpll *pll)
1087 {
1088 }
1089 
1090 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1091 				  struct intel_shared_dpll *pll)
1092 {
1093 }
1094 
1095 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1096 				       struct intel_shared_dpll *pll,
1097 				       struct intel_dpll_hw_state *hw_state)
1098 {
1099 	return true;
1100 }
1101 
1102 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1103 	.enable = hsw_ddi_lcpll_enable,
1104 	.disable = hsw_ddi_lcpll_disable,
1105 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1106 	.get_freq = hsw_ddi_lcpll_get_freq,
1107 };
1108 
1109 static const struct dpll_info hsw_plls[] = {
1110 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1111 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1112 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1113 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1114 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1115 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1116 	{ },
1117 };
1118 
1119 static const struct intel_dpll_mgr hsw_pll_mgr = {
1120 	.dpll_info = hsw_plls,
1121 	.get_dplls = hsw_get_dpll,
1122 	.put_dplls = intel_put_dpll,
1123 	.update_ref_clks = hsw_update_dpll_ref_clks,
1124 	.dump_hw_state = hsw_dump_hw_state,
1125 };
1126 
1127 struct skl_dpll_regs {
1128 	i915_reg_t ctl, cfgcr1, cfgcr2;
1129 };
1130 
1131 /* this array is indexed by the *shared* pll id */
1132 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1133 	{
1134 		/* DPLL 0 */
1135 		.ctl = LCPLL1_CTL,
1136 		/* DPLL 0 doesn't support HDMI mode */
1137 	},
1138 	{
1139 		/* DPLL 1 */
1140 		.ctl = LCPLL2_CTL,
1141 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1142 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1143 	},
1144 	{
1145 		/* DPLL 2 */
1146 		.ctl = WRPLL_CTL(0),
1147 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1148 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1149 	},
1150 	{
1151 		/* DPLL 3 */
1152 		.ctl = WRPLL_CTL(1),
1153 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1154 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1155 	},
1156 };
1157 
1158 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1159 				    struct intel_shared_dpll *pll)
1160 {
1161 	const enum intel_dpll_id id = pll->info->id;
1162 	u32 val;
1163 
1164 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1165 
1166 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1167 		 DPLL_CTRL1_SSC(id) |
1168 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1169 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1170 
1171 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1172 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1173 }
1174 
1175 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1176 			       struct intel_shared_dpll *pll)
1177 {
1178 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1179 	const enum intel_dpll_id id = pll->info->id;
1180 
1181 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1182 
1183 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1184 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1185 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1186 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1187 
1188 	/* the enable bit is always bit 31 */
1189 	intel_de_write(dev_priv, regs[id].ctl,
1190 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1191 
1192 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1193 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1194 }
1195 
1196 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1197 				 struct intel_shared_dpll *pll)
1198 {
1199 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1200 }
1201 
1202 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1203 				struct intel_shared_dpll *pll)
1204 {
1205 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1206 	const enum intel_dpll_id id = pll->info->id;
1207 
1208 	/* the enable bit is always bit 31 */
1209 	intel_de_write(dev_priv, regs[id].ctl,
1210 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1211 	intel_de_posting_read(dev_priv, regs[id].ctl);
1212 }
1213 
1214 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1215 				  struct intel_shared_dpll *pll)
1216 {
1217 }
1218 
1219 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1220 				     struct intel_shared_dpll *pll,
1221 				     struct intel_dpll_hw_state *hw_state)
1222 {
1223 	u32 val;
1224 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1225 	const enum intel_dpll_id id = pll->info->id;
1226 	intel_wakeref_t wakeref;
1227 	bool ret;
1228 
1229 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1230 						     POWER_DOMAIN_DISPLAY_CORE);
1231 	if (!wakeref)
1232 		return false;
1233 
1234 	ret = false;
1235 
1236 	val = intel_de_read(dev_priv, regs[id].ctl);
1237 	if (!(val & LCPLL_PLL_ENABLE))
1238 		goto out;
1239 
1240 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1241 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1242 
1243 	/* avoid reading back stale values if HDMI mode is not enabled */
1244 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1245 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1246 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1247 	}
1248 	ret = true;
1249 
1250 out:
1251 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1252 
1253 	return ret;
1254 }
1255 
1256 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1257 				       struct intel_shared_dpll *pll,
1258 				       struct intel_dpll_hw_state *hw_state)
1259 {
1260 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1261 	const enum intel_dpll_id id = pll->info->id;
1262 	intel_wakeref_t wakeref;
1263 	u32 val;
1264 	bool ret;
1265 
1266 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1267 						     POWER_DOMAIN_DISPLAY_CORE);
1268 	if (!wakeref)
1269 		return false;
1270 
1271 	ret = false;
1272 
1273 	/* DPLL0 is always enabled since it drives CDCLK */
1274 	val = intel_de_read(dev_priv, regs[id].ctl);
1275 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1276 		goto out;
1277 
1278 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1279 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1280 
1281 	ret = true;
1282 
1283 out:
1284 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1285 
1286 	return ret;
1287 }
1288 
1289 struct skl_wrpll_context {
1290 	u64 min_deviation;		/* current minimal deviation */
1291 	u64 central_freq;		/* chosen central freq */
1292 	u64 dco_freq;			/* chosen dco freq */
1293 	unsigned int p;			/* chosen divider */
1294 };
1295 
1296 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1297 {
1298 	memset(ctx, 0, sizeof(*ctx));
1299 
1300 	ctx->min_deviation = U64_MAX;
1301 }
1302 
1303 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1304 #define SKL_DCO_MAX_PDEVIATION	100
1305 #define SKL_DCO_MAX_NDEVIATION	600
1306 
1307 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1308 				  u64 central_freq,
1309 				  u64 dco_freq,
1310 				  unsigned int divider)
1311 {
1312 	u64 deviation;
1313 
1314 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1315 			      central_freq);
1316 
1317 	/* positive deviation */
1318 	if (dco_freq >= central_freq) {
1319 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1320 		    deviation < ctx->min_deviation) {
1321 			ctx->min_deviation = deviation;
1322 			ctx->central_freq = central_freq;
1323 			ctx->dco_freq = dco_freq;
1324 			ctx->p = divider;
1325 		}
1326 	/* negative deviation */
1327 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1328 		   deviation < ctx->min_deviation) {
1329 		ctx->min_deviation = deviation;
1330 		ctx->central_freq = central_freq;
1331 		ctx->dco_freq = dco_freq;
1332 		ctx->p = divider;
1333 	}
1334 }
1335 
1336 static void skl_wrpll_get_multipliers(unsigned int p,
1337 				      unsigned int *p0 /* out */,
1338 				      unsigned int *p1 /* out */,
1339 				      unsigned int *p2 /* out */)
1340 {
1341 	/* even dividers */
1342 	if (p % 2 == 0) {
1343 		unsigned int half = p / 2;
1344 
1345 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1346 			*p0 = 2;
1347 			*p1 = 1;
1348 			*p2 = half;
1349 		} else if (half % 2 == 0) {
1350 			*p0 = 2;
1351 			*p1 = half / 2;
1352 			*p2 = 2;
1353 		} else if (half % 3 == 0) {
1354 			*p0 = 3;
1355 			*p1 = half / 3;
1356 			*p2 = 2;
1357 		} else if (half % 7 == 0) {
1358 			*p0 = 7;
1359 			*p1 = half / 7;
1360 			*p2 = 2;
1361 		}
1362 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1363 		*p0 = 3;
1364 		*p1 = 1;
1365 		*p2 = p / 3;
1366 	} else if (p == 5 || p == 7) {
1367 		*p0 = p;
1368 		*p1 = 1;
1369 		*p2 = 1;
1370 	} else if (p == 15) {
1371 		*p0 = 3;
1372 		*p1 = 1;
1373 		*p2 = 5;
1374 	} else if (p == 21) {
1375 		*p0 = 7;
1376 		*p1 = 1;
1377 		*p2 = 3;
1378 	} else if (p == 35) {
1379 		*p0 = 7;
1380 		*p1 = 1;
1381 		*p2 = 5;
1382 	}
1383 }
1384 
1385 struct skl_wrpll_params {
1386 	u32 dco_fraction;
1387 	u32 dco_integer;
1388 	u32 qdiv_ratio;
1389 	u32 qdiv_mode;
1390 	u32 kdiv;
1391 	u32 pdiv;
1392 	u32 central_freq;
1393 };
1394 
1395 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1396 				      u64 afe_clock,
1397 				      int ref_clock,
1398 				      u64 central_freq,
1399 				      u32 p0, u32 p1, u32 p2)
1400 {
1401 	u64 dco_freq;
1402 
1403 	switch (central_freq) {
1404 	case 9600000000ULL:
1405 		params->central_freq = 0;
1406 		break;
1407 	case 9000000000ULL:
1408 		params->central_freq = 1;
1409 		break;
1410 	case 8400000000ULL:
1411 		params->central_freq = 3;
1412 	}
1413 
1414 	switch (p0) {
1415 	case 1:
1416 		params->pdiv = 0;
1417 		break;
1418 	case 2:
1419 		params->pdiv = 1;
1420 		break;
1421 	case 3:
1422 		params->pdiv = 2;
1423 		break;
1424 	case 7:
1425 		params->pdiv = 4;
1426 		break;
1427 	default:
1428 		WARN(1, "Incorrect PDiv\n");
1429 	}
1430 
1431 	switch (p2) {
1432 	case 5:
1433 		params->kdiv = 0;
1434 		break;
1435 	case 2:
1436 		params->kdiv = 1;
1437 		break;
1438 	case 3:
1439 		params->kdiv = 2;
1440 		break;
1441 	case 1:
1442 		params->kdiv = 3;
1443 		break;
1444 	default:
1445 		WARN(1, "Incorrect KDiv\n");
1446 	}
1447 
1448 	params->qdiv_ratio = p1;
1449 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1450 
1451 	dco_freq = p0 * p1 * p2 * afe_clock;
1452 
1453 	/*
1454 	 * Intermediate values are in Hz.
1455 	 * Divide by MHz to match bsepc
1456 	 */
1457 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1458 	params->dco_fraction =
1459 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1460 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1461 }
1462 
1463 static bool
1464 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1465 			int ref_clock,
1466 			struct skl_wrpll_params *wrpll_params)
1467 {
1468 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1469 	u64 dco_central_freq[3] = { 8400000000ULL,
1470 				    9000000000ULL,
1471 				    9600000000ULL };
1472 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1473 					     24, 28, 30, 32, 36, 40, 42, 44,
1474 					     48, 52, 54, 56, 60, 64, 66, 68,
1475 					     70, 72, 76, 78, 80, 84, 88, 90,
1476 					     92, 96, 98 };
1477 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1478 	static const struct {
1479 		const int *list;
1480 		int n_dividers;
1481 	} dividers[] = {
1482 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1483 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1484 	};
1485 	struct skl_wrpll_context ctx;
1486 	unsigned int dco, d, i;
1487 	unsigned int p0, p1, p2;
1488 
1489 	skl_wrpll_context_init(&ctx);
1490 
1491 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1492 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1493 			for (i = 0; i < dividers[d].n_dividers; i++) {
1494 				unsigned int p = dividers[d].list[i];
1495 				u64 dco_freq = p * afe_clock;
1496 
1497 				skl_wrpll_try_divider(&ctx,
1498 						      dco_central_freq[dco],
1499 						      dco_freq,
1500 						      p);
1501 				/*
1502 				 * Skip the remaining dividers if we're sure to
1503 				 * have found the definitive divider, we can't
1504 				 * improve a 0 deviation.
1505 				 */
1506 				if (ctx.min_deviation == 0)
1507 					goto skip_remaining_dividers;
1508 			}
1509 		}
1510 
1511 skip_remaining_dividers:
1512 		/*
1513 		 * If a solution is found with an even divider, prefer
1514 		 * this one.
1515 		 */
1516 		if (d == 0 && ctx.p)
1517 			break;
1518 	}
1519 
1520 	if (!ctx.p) {
1521 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1522 		return false;
1523 	}
1524 
1525 	/*
1526 	 * gcc incorrectly analyses that these can be used without being
1527 	 * initialized. To be fair, it's hard to guess.
1528 	 */
1529 	p0 = p1 = p2 = 0;
1530 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1531 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1532 				  ctx.central_freq, p0, p1, p2);
1533 
1534 	return true;
1535 }
1536 
1537 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1538 {
1539 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1540 	u32 ctrl1, cfgcr1, cfgcr2;
1541 	struct skl_wrpll_params wrpll_params = { 0, };
1542 
1543 	/*
1544 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1545 	 * as the DPLL id in this function.
1546 	 */
1547 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1548 
1549 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1550 
1551 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1552 				     i915->dpll.ref_clks.nssc,
1553 				     &wrpll_params))
1554 		return false;
1555 
1556 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1557 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1558 		wrpll_params.dco_integer;
1559 
1560 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1561 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1562 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1563 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1564 		wrpll_params.central_freq;
1565 
1566 	memset(&crtc_state->dpll_hw_state, 0,
1567 	       sizeof(crtc_state->dpll_hw_state));
1568 
1569 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1570 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1571 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1572 	return true;
1573 }
1574 
1575 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1576 				  const struct intel_shared_dpll *pll,
1577 				  const struct intel_dpll_hw_state *pll_state)
1578 {
1579 	int ref_clock = i915->dpll.ref_clks.nssc;
1580 	u32 p0, p1, p2, dco_freq;
1581 
1582 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1583 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1584 
1585 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1586 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1587 	else
1588 		p1 = 1;
1589 
1590 
1591 	switch (p0) {
1592 	case DPLL_CFGCR2_PDIV_1:
1593 		p0 = 1;
1594 		break;
1595 	case DPLL_CFGCR2_PDIV_2:
1596 		p0 = 2;
1597 		break;
1598 	case DPLL_CFGCR2_PDIV_3:
1599 		p0 = 3;
1600 		break;
1601 	case DPLL_CFGCR2_PDIV_7_INVALID:
1602 		/*
1603 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1604 		 * handling it the same way as PDIV_7.
1605 		 */
1606 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1607 		fallthrough;
1608 	case DPLL_CFGCR2_PDIV_7:
1609 		p0 = 7;
1610 		break;
1611 	default:
1612 		MISSING_CASE(p0);
1613 		return 0;
1614 	}
1615 
1616 	switch (p2) {
1617 	case DPLL_CFGCR2_KDIV_5:
1618 		p2 = 5;
1619 		break;
1620 	case DPLL_CFGCR2_KDIV_2:
1621 		p2 = 2;
1622 		break;
1623 	case DPLL_CFGCR2_KDIV_3:
1624 		p2 = 3;
1625 		break;
1626 	case DPLL_CFGCR2_KDIV_1:
1627 		p2 = 1;
1628 		break;
1629 	default:
1630 		MISSING_CASE(p2);
1631 		return 0;
1632 	}
1633 
1634 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1635 		   ref_clock;
1636 
1637 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1638 		    ref_clock / 0x8000;
1639 
1640 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1641 		return 0;
1642 
1643 	return dco_freq / (p0 * p1 * p2 * 5);
1644 }
1645 
1646 static bool
1647 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1648 {
1649 	u32 ctrl1;
1650 
1651 	/*
1652 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1653 	 * as the DPLL id in this function.
1654 	 */
1655 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1656 	switch (crtc_state->port_clock / 2) {
1657 	case 81000:
1658 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1659 		break;
1660 	case 135000:
1661 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1662 		break;
1663 	case 270000:
1664 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1665 		break;
1666 		/* eDP 1.4 rates */
1667 	case 162000:
1668 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1669 		break;
1670 	case 108000:
1671 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1672 		break;
1673 	case 216000:
1674 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1675 		break;
1676 	}
1677 
1678 	memset(&crtc_state->dpll_hw_state, 0,
1679 	       sizeof(crtc_state->dpll_hw_state));
1680 
1681 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1682 
1683 	return true;
1684 }
1685 
1686 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1687 				  const struct intel_shared_dpll *pll,
1688 				  const struct intel_dpll_hw_state *pll_state)
1689 {
1690 	int link_clock = 0;
1691 
1692 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1693 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1694 	case DPLL_CTRL1_LINK_RATE_810:
1695 		link_clock = 81000;
1696 		break;
1697 	case DPLL_CTRL1_LINK_RATE_1080:
1698 		link_clock = 108000;
1699 		break;
1700 	case DPLL_CTRL1_LINK_RATE_1350:
1701 		link_clock = 135000;
1702 		break;
1703 	case DPLL_CTRL1_LINK_RATE_1620:
1704 		link_clock = 162000;
1705 		break;
1706 	case DPLL_CTRL1_LINK_RATE_2160:
1707 		link_clock = 216000;
1708 		break;
1709 	case DPLL_CTRL1_LINK_RATE_2700:
1710 		link_clock = 270000;
1711 		break;
1712 	default:
1713 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1714 		break;
1715 	}
1716 
1717 	return link_clock * 2;
1718 }
1719 
1720 static bool skl_get_dpll(struct intel_atomic_state *state,
1721 			 struct intel_crtc *crtc,
1722 			 struct intel_encoder *encoder)
1723 {
1724 	struct intel_crtc_state *crtc_state =
1725 		intel_atomic_get_new_crtc_state(state, crtc);
1726 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1727 	struct intel_shared_dpll *pll;
1728 	bool bret;
1729 
1730 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1731 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1732 		if (!bret) {
1733 			drm_dbg_kms(&i915->drm,
1734 				    "Could not get HDMI pll dividers.\n");
1735 			return false;
1736 		}
1737 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1738 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1739 		if (!bret) {
1740 			drm_dbg_kms(&i915->drm,
1741 				    "Could not set DP dpll HW state.\n");
1742 			return false;
1743 		}
1744 	} else {
1745 		return false;
1746 	}
1747 
1748 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1749 		pll = intel_find_shared_dpll(state, crtc,
1750 					     &crtc_state->dpll_hw_state,
1751 					     BIT(DPLL_ID_SKL_DPLL0));
1752 	else
1753 		pll = intel_find_shared_dpll(state, crtc,
1754 					     &crtc_state->dpll_hw_state,
1755 					     BIT(DPLL_ID_SKL_DPLL3) |
1756 					     BIT(DPLL_ID_SKL_DPLL2) |
1757 					     BIT(DPLL_ID_SKL_DPLL1));
1758 	if (!pll)
1759 		return false;
1760 
1761 	intel_reference_shared_dpll(state, crtc,
1762 				    pll, &crtc_state->dpll_hw_state);
1763 
1764 	crtc_state->shared_dpll = pll;
1765 
1766 	return true;
1767 }
1768 
1769 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1770 				const struct intel_shared_dpll *pll,
1771 				const struct intel_dpll_hw_state *pll_state)
1772 {
1773 	/*
1774 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1775 	 * the internal shift for each field
1776 	 */
1777 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1778 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1779 	else
1780 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1781 }
1782 
1783 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1784 {
1785 	/* No SSC ref */
1786 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1787 }
1788 
1789 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1790 			      const struct intel_dpll_hw_state *hw_state)
1791 {
1792 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1793 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1794 		      hw_state->ctrl1,
1795 		      hw_state->cfgcr1,
1796 		      hw_state->cfgcr2);
1797 }
1798 
1799 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1800 	.enable = skl_ddi_pll_enable,
1801 	.disable = skl_ddi_pll_disable,
1802 	.get_hw_state = skl_ddi_pll_get_hw_state,
1803 	.get_freq = skl_ddi_pll_get_freq,
1804 };
1805 
1806 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1807 	.enable = skl_ddi_dpll0_enable,
1808 	.disable = skl_ddi_dpll0_disable,
1809 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1810 	.get_freq = skl_ddi_pll_get_freq,
1811 };
1812 
1813 static const struct dpll_info skl_plls[] = {
1814 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1815 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1816 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1817 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1818 	{ },
1819 };
1820 
1821 static const struct intel_dpll_mgr skl_pll_mgr = {
1822 	.dpll_info = skl_plls,
1823 	.get_dplls = skl_get_dpll,
1824 	.put_dplls = intel_put_dpll,
1825 	.update_ref_clks = skl_update_dpll_ref_clks,
1826 	.dump_hw_state = skl_dump_hw_state,
1827 };
1828 
1829 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1830 				struct intel_shared_dpll *pll)
1831 {
1832 	u32 temp;
1833 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1834 	enum dpio_phy phy;
1835 	enum dpio_channel ch;
1836 
1837 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1838 
1839 	/* Non-SSC reference */
1840 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1841 	temp |= PORT_PLL_REF_SEL;
1842 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1843 
1844 	if (IS_GEMINILAKE(dev_priv)) {
1845 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1846 		temp |= PORT_PLL_POWER_ENABLE;
1847 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1848 
1849 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1850 				 PORT_PLL_POWER_STATE), 200))
1851 			drm_err(&dev_priv->drm,
1852 				"Power state not set for PLL:%d\n", port);
1853 	}
1854 
1855 	/* Disable 10 bit clock */
1856 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1857 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1858 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1859 
1860 	/* Write P1 & P2 */
1861 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1862 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1863 	temp |= pll->state.hw_state.ebb0;
1864 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1865 
1866 	/* Write M2 integer */
1867 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1868 	temp &= ~PORT_PLL_M2_MASK;
1869 	temp |= pll->state.hw_state.pll0;
1870 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1871 
1872 	/* Write N */
1873 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1874 	temp &= ~PORT_PLL_N_MASK;
1875 	temp |= pll->state.hw_state.pll1;
1876 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1877 
1878 	/* Write M2 fraction */
1879 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1880 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1881 	temp |= pll->state.hw_state.pll2;
1882 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1883 
1884 	/* Write M2 fraction enable */
1885 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1886 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1887 	temp |= pll->state.hw_state.pll3;
1888 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1889 
1890 	/* Write coeff */
1891 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1892 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1893 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1894 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1895 	temp |= pll->state.hw_state.pll6;
1896 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1897 
1898 	/* Write calibration val */
1899 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1900 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1901 	temp |= pll->state.hw_state.pll8;
1902 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1903 
1904 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1905 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1906 	temp |= pll->state.hw_state.pll9;
1907 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1908 
1909 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1910 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1911 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1912 	temp |= pll->state.hw_state.pll10;
1913 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1914 
1915 	/* Recalibrate with new settings */
1916 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1917 	temp |= PORT_PLL_RECALIBRATE;
1918 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1919 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1920 	temp |= pll->state.hw_state.ebb4;
1921 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1922 
1923 	/* Enable PLL */
1924 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1925 	temp |= PORT_PLL_ENABLE;
1926 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1927 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1928 
1929 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1930 			200))
1931 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1932 
1933 	if (IS_GEMINILAKE(dev_priv)) {
1934 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1935 		temp |= DCC_DELAY_RANGE_2;
1936 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1937 	}
1938 
1939 	/*
1940 	 * While we write to the group register to program all lanes at once we
1941 	 * can read only lane registers and we pick lanes 0/1 for that.
1942 	 */
1943 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1944 	temp &= ~LANE_STAGGER_MASK;
1945 	temp &= ~LANESTAGGER_STRAP_OVRD;
1946 	temp |= pll->state.hw_state.pcsdw12;
1947 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1948 }
1949 
1950 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1951 					struct intel_shared_dpll *pll)
1952 {
1953 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1954 	u32 temp;
1955 
1956 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1957 	temp &= ~PORT_PLL_ENABLE;
1958 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1959 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1960 
1961 	if (IS_GEMINILAKE(dev_priv)) {
1962 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1963 		temp &= ~PORT_PLL_POWER_ENABLE;
1964 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1965 
1966 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1967 				  PORT_PLL_POWER_STATE), 200))
1968 			drm_err(&dev_priv->drm,
1969 				"Power state not reset for PLL:%d\n", port);
1970 	}
1971 }
1972 
1973 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1974 					struct intel_shared_dpll *pll,
1975 					struct intel_dpll_hw_state *hw_state)
1976 {
1977 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1978 	intel_wakeref_t wakeref;
1979 	enum dpio_phy phy;
1980 	enum dpio_channel ch;
1981 	u32 val;
1982 	bool ret;
1983 
1984 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1985 
1986 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1987 						     POWER_DOMAIN_DISPLAY_CORE);
1988 	if (!wakeref)
1989 		return false;
1990 
1991 	ret = false;
1992 
1993 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1994 	if (!(val & PORT_PLL_ENABLE))
1995 		goto out;
1996 
1997 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1998 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1999 
2000 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2001 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2002 
2003 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2004 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2005 
2006 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2007 	hw_state->pll1 &= PORT_PLL_N_MASK;
2008 
2009 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2010 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2011 
2012 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2013 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2014 
2015 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2016 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2017 			  PORT_PLL_INT_COEFF_MASK |
2018 			  PORT_PLL_GAIN_CTL_MASK;
2019 
2020 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2021 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2022 
2023 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2024 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2025 
2026 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2027 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2028 			   PORT_PLL_DCO_AMP_MASK;
2029 
2030 	/*
2031 	 * While we write to the group register to program all lanes at once we
2032 	 * can read only lane registers. We configure all lanes the same way, so
2033 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2034 	 */
2035 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2036 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2037 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2038 		drm_dbg(&dev_priv->drm,
2039 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2040 			hw_state->pcsdw12,
2041 			intel_de_read(dev_priv,
2042 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2043 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2044 
2045 	ret = true;
2046 
2047 out:
2048 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2049 
2050 	return ret;
2051 }
2052 
2053 /* bxt clock parameters */
2054 struct bxt_clk_div {
2055 	int clock;
2056 	u32 p1;
2057 	u32 p2;
2058 	u32 m2_int;
2059 	u32 m2_frac;
2060 	bool m2_frac_en;
2061 	u32 n;
2062 
2063 	int vco;
2064 };
2065 
2066 /* pre-calculated values for DP linkrates */
2067 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2068 	{162000, 4, 2, 32, 1677722, 1, 1},
2069 	{270000, 4, 1, 27,       0, 0, 1},
2070 	{540000, 2, 1, 27,       0, 0, 1},
2071 	{216000, 3, 2, 32, 1677722, 1, 1},
2072 	{243000, 4, 1, 24, 1258291, 1, 1},
2073 	{324000, 4, 1, 32, 1677722, 1, 1},
2074 	{432000, 3, 1, 32, 1677722, 1, 1}
2075 };
2076 
2077 static bool
2078 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2079 			  struct bxt_clk_div *clk_div)
2080 {
2081 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2082 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2083 	struct dpll best_clock;
2084 
2085 	/* Calculate HDMI div */
2086 	/*
2087 	 * FIXME: tie the following calculation into
2088 	 * i9xx_crtc_compute_clock
2089 	 */
2090 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2091 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2092 			crtc_state->port_clock,
2093 			pipe_name(crtc->pipe));
2094 		return false;
2095 	}
2096 
2097 	clk_div->p1 = best_clock.p1;
2098 	clk_div->p2 = best_clock.p2;
2099 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2100 	clk_div->n = best_clock.n;
2101 	clk_div->m2_int = best_clock.m2 >> 22;
2102 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2103 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2104 
2105 	clk_div->vco = best_clock.vco;
2106 
2107 	return true;
2108 }
2109 
2110 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2111 				    struct bxt_clk_div *clk_div)
2112 {
2113 	int clock = crtc_state->port_clock;
2114 	int i;
2115 
2116 	*clk_div = bxt_dp_clk_val[0];
2117 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2118 		if (bxt_dp_clk_val[i].clock == clock) {
2119 			*clk_div = bxt_dp_clk_val[i];
2120 			break;
2121 		}
2122 	}
2123 
2124 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2125 }
2126 
2127 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2128 				      const struct bxt_clk_div *clk_div)
2129 {
2130 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2131 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2132 	int clock = crtc_state->port_clock;
2133 	int vco = clk_div->vco;
2134 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2135 	u32 lanestagger;
2136 
2137 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2138 
2139 	if (vco >= 6200000 && vco <= 6700000) {
2140 		prop_coef = 4;
2141 		int_coef = 9;
2142 		gain_ctl = 3;
2143 		targ_cnt = 8;
2144 	} else if ((vco > 5400000 && vco < 6200000) ||
2145 			(vco >= 4800000 && vco < 5400000)) {
2146 		prop_coef = 5;
2147 		int_coef = 11;
2148 		gain_ctl = 3;
2149 		targ_cnt = 9;
2150 	} else if (vco == 5400000) {
2151 		prop_coef = 3;
2152 		int_coef = 8;
2153 		gain_ctl = 1;
2154 		targ_cnt = 9;
2155 	} else {
2156 		drm_err(&i915->drm, "Invalid VCO\n");
2157 		return false;
2158 	}
2159 
2160 	if (clock > 270000)
2161 		lanestagger = 0x18;
2162 	else if (clock > 135000)
2163 		lanestagger = 0x0d;
2164 	else if (clock > 67000)
2165 		lanestagger = 0x07;
2166 	else if (clock > 33000)
2167 		lanestagger = 0x04;
2168 	else
2169 		lanestagger = 0x02;
2170 
2171 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2172 	dpll_hw_state->pll0 = clk_div->m2_int;
2173 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2174 	dpll_hw_state->pll2 = clk_div->m2_frac;
2175 
2176 	if (clk_div->m2_frac_en)
2177 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2178 
2179 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2180 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2181 
2182 	dpll_hw_state->pll8 = targ_cnt;
2183 
2184 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2185 
2186 	dpll_hw_state->pll10 =
2187 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2188 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2189 
2190 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2191 
2192 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2193 
2194 	return true;
2195 }
2196 
2197 static bool
2198 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2199 {
2200 	struct bxt_clk_div clk_div = {};
2201 
2202 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2203 
2204 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2205 }
2206 
2207 static bool
2208 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2209 {
2210 	struct bxt_clk_div clk_div = {};
2211 
2212 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2213 
2214 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2215 }
2216 
2217 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2218 				const struct intel_shared_dpll *pll,
2219 				const struct intel_dpll_hw_state *pll_state)
2220 {
2221 	struct dpll clock;
2222 
2223 	clock.m1 = 2;
2224 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2225 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2226 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2227 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2228 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2229 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2230 
2231 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2232 }
2233 
2234 static bool bxt_get_dpll(struct intel_atomic_state *state,
2235 			 struct intel_crtc *crtc,
2236 			 struct intel_encoder *encoder)
2237 {
2238 	struct intel_crtc_state *crtc_state =
2239 		intel_atomic_get_new_crtc_state(state, crtc);
2240 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2241 	struct intel_shared_dpll *pll;
2242 	enum intel_dpll_id id;
2243 
2244 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2245 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2246 		return false;
2247 
2248 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2249 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2250 		return false;
2251 
2252 	/* 1:1 mapping between ports and PLLs */
2253 	id = (enum intel_dpll_id) encoder->port;
2254 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2255 
2256 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2257 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2258 
2259 	intel_reference_shared_dpll(state, crtc,
2260 				    pll, &crtc_state->dpll_hw_state);
2261 
2262 	crtc_state->shared_dpll = pll;
2263 
2264 	return true;
2265 }
2266 
2267 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2268 {
2269 	i915->dpll.ref_clks.ssc = 100000;
2270 	i915->dpll.ref_clks.nssc = 100000;
2271 	/* DSI non-SSC ref 19.2MHz */
2272 }
2273 
2274 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2275 			      const struct intel_dpll_hw_state *hw_state)
2276 {
2277 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2278 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2279 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2280 		    hw_state->ebb0,
2281 		    hw_state->ebb4,
2282 		    hw_state->pll0,
2283 		    hw_state->pll1,
2284 		    hw_state->pll2,
2285 		    hw_state->pll3,
2286 		    hw_state->pll6,
2287 		    hw_state->pll8,
2288 		    hw_state->pll9,
2289 		    hw_state->pll10,
2290 		    hw_state->pcsdw12);
2291 }
2292 
2293 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2294 	.enable = bxt_ddi_pll_enable,
2295 	.disable = bxt_ddi_pll_disable,
2296 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2297 	.get_freq = bxt_ddi_pll_get_freq,
2298 };
2299 
2300 static const struct dpll_info bxt_plls[] = {
2301 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2302 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2303 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2304 	{ },
2305 };
2306 
2307 static const struct intel_dpll_mgr bxt_pll_mgr = {
2308 	.dpll_info = bxt_plls,
2309 	.get_dplls = bxt_get_dpll,
2310 	.put_dplls = intel_put_dpll,
2311 	.update_ref_clks = bxt_update_dpll_ref_clks,
2312 	.dump_hw_state = bxt_dump_hw_state,
2313 };
2314 
2315 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2316 				      int *qdiv, int *kdiv)
2317 {
2318 	/* even dividers */
2319 	if (bestdiv % 2 == 0) {
2320 		if (bestdiv == 2) {
2321 			*pdiv = 2;
2322 			*qdiv = 1;
2323 			*kdiv = 1;
2324 		} else if (bestdiv % 4 == 0) {
2325 			*pdiv = 2;
2326 			*qdiv = bestdiv / 4;
2327 			*kdiv = 2;
2328 		} else if (bestdiv % 6 == 0) {
2329 			*pdiv = 3;
2330 			*qdiv = bestdiv / 6;
2331 			*kdiv = 2;
2332 		} else if (bestdiv % 5 == 0) {
2333 			*pdiv = 5;
2334 			*qdiv = bestdiv / 10;
2335 			*kdiv = 2;
2336 		} else if (bestdiv % 14 == 0) {
2337 			*pdiv = 7;
2338 			*qdiv = bestdiv / 14;
2339 			*kdiv = 2;
2340 		}
2341 	} else {
2342 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2343 			*pdiv = bestdiv;
2344 			*qdiv = 1;
2345 			*kdiv = 1;
2346 		} else { /* 9, 15, 21 */
2347 			*pdiv = bestdiv / 3;
2348 			*qdiv = 1;
2349 			*kdiv = 3;
2350 		}
2351 	}
2352 }
2353 
2354 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2355 				      u32 dco_freq, u32 ref_freq,
2356 				      int pdiv, int qdiv, int kdiv)
2357 {
2358 	u32 dco;
2359 
2360 	switch (kdiv) {
2361 	case 1:
2362 		params->kdiv = 1;
2363 		break;
2364 	case 2:
2365 		params->kdiv = 2;
2366 		break;
2367 	case 3:
2368 		params->kdiv = 4;
2369 		break;
2370 	default:
2371 		WARN(1, "Incorrect KDiv\n");
2372 	}
2373 
2374 	switch (pdiv) {
2375 	case 2:
2376 		params->pdiv = 1;
2377 		break;
2378 	case 3:
2379 		params->pdiv = 2;
2380 		break;
2381 	case 5:
2382 		params->pdiv = 4;
2383 		break;
2384 	case 7:
2385 		params->pdiv = 8;
2386 		break;
2387 	default:
2388 		WARN(1, "Incorrect PDiv\n");
2389 	}
2390 
2391 	WARN_ON(kdiv != 2 && qdiv != 1);
2392 
2393 	params->qdiv_ratio = qdiv;
2394 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2395 
2396 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2397 
2398 	params->dco_integer = dco >> 15;
2399 	params->dco_fraction = dco & 0x7fff;
2400 }
2401 
2402 /*
2403  * Display WA #22010492432: ehl, tgl, adl-p
2404  * Program half of the nominal DCO divider fraction value.
2405  */
2406 static bool
2407 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2408 {
2409 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2410 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2411 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
2412 		 i915->dpll.ref_clks.nssc == 38400;
2413 }
2414 
2415 struct icl_combo_pll_params {
2416 	int clock;
2417 	struct skl_wrpll_params wrpll;
2418 };
2419 
2420 /*
2421  * These values alrea already adjusted: they're the bits we write to the
2422  * registers, not the logical values.
2423  */
2424 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2425 	{ 540000,
2426 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2427 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2428 	{ 270000,
2429 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2430 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2431 	{ 162000,
2432 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2433 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2434 	{ 324000,
2435 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2436 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2437 	{ 216000,
2438 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2439 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2440 	{ 432000,
2441 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2442 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2443 	{ 648000,
2444 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2445 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2446 	{ 810000,
2447 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2448 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2449 };
2450 
2451 
2452 /* Also used for 38.4 MHz values. */
2453 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2454 	{ 540000,
2455 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2456 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2457 	{ 270000,
2458 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2459 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2460 	{ 162000,
2461 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2462 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2463 	{ 324000,
2464 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2465 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2466 	{ 216000,
2467 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2468 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2469 	{ 432000,
2470 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2471 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2472 	{ 648000,
2473 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2474 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2475 	{ 810000,
2476 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2477 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2478 };
2479 
2480 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2481 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2482 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2483 };
2484 
2485 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2486 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2487 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2488 };
2489 
2490 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2491 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2492 	/* the following params are unused */
2493 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2494 };
2495 
2496 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2497 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2498 	/* the following params are unused */
2499 };
2500 
2501 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2502 				  struct skl_wrpll_params *pll_params)
2503 {
2504 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2505 	const struct icl_combo_pll_params *params =
2506 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2507 		icl_dp_combo_pll_24MHz_values :
2508 		icl_dp_combo_pll_19_2MHz_values;
2509 	int clock = crtc_state->port_clock;
2510 	int i;
2511 
2512 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2513 		if (clock == params[i].clock) {
2514 			*pll_params = params[i].wrpll;
2515 			return true;
2516 		}
2517 	}
2518 
2519 	MISSING_CASE(clock);
2520 	return false;
2521 }
2522 
2523 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2524 			     struct skl_wrpll_params *pll_params)
2525 {
2526 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2527 
2528 	if (DISPLAY_VER(dev_priv) >= 12) {
2529 		switch (dev_priv->dpll.ref_clks.nssc) {
2530 		default:
2531 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2532 			fallthrough;
2533 		case 19200:
2534 		case 38400:
2535 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2536 			break;
2537 		case 24000:
2538 			*pll_params = tgl_tbt_pll_24MHz_values;
2539 			break;
2540 		}
2541 	} else {
2542 		switch (dev_priv->dpll.ref_clks.nssc) {
2543 		default:
2544 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2545 			fallthrough;
2546 		case 19200:
2547 		case 38400:
2548 			*pll_params = icl_tbt_pll_19_2MHz_values;
2549 			break;
2550 		case 24000:
2551 			*pll_params = icl_tbt_pll_24MHz_values;
2552 			break;
2553 		}
2554 	}
2555 
2556 	return true;
2557 }
2558 
2559 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2560 				    const struct intel_shared_dpll *pll,
2561 				    const struct intel_dpll_hw_state *pll_state)
2562 {
2563 	/*
2564 	 * The PLL outputs multiple frequencies at the same time, selection is
2565 	 * made at DDI clock mux level.
2566 	 */
2567 	drm_WARN_ON(&i915->drm, 1);
2568 
2569 	return 0;
2570 }
2571 
2572 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2573 {
2574 	int ref_clock = i915->dpll.ref_clks.nssc;
2575 
2576 	/*
2577 	 * For ICL+, the spec states: if reference frequency is 38.4,
2578 	 * use 19.2 because the DPLL automatically divides that by 2.
2579 	 */
2580 	if (ref_clock == 38400)
2581 		ref_clock = 19200;
2582 
2583 	return ref_clock;
2584 }
2585 
2586 static bool
2587 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2588 	       struct skl_wrpll_params *wrpll_params)
2589 {
2590 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2591 	int ref_clock = icl_wrpll_ref_clock(i915);
2592 	u32 afe_clock = crtc_state->port_clock * 5;
2593 	u32 dco_min = 7998000;
2594 	u32 dco_max = 10000000;
2595 	u32 dco_mid = (dco_min + dco_max) / 2;
2596 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2597 					 18, 20, 24, 28, 30, 32,  36,  40,
2598 					 42, 44, 48, 50, 52, 54,  56,  60,
2599 					 64, 66, 68, 70, 72, 76,  78,  80,
2600 					 84, 88, 90, 92, 96, 98, 100, 102,
2601 					  3,  5,  7,  9, 15, 21 };
2602 	u32 dco, best_dco = 0, dco_centrality = 0;
2603 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2604 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2605 
2606 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2607 		dco = afe_clock * dividers[d];
2608 
2609 		if (dco <= dco_max && dco >= dco_min) {
2610 			dco_centrality = abs(dco - dco_mid);
2611 
2612 			if (dco_centrality < best_dco_centrality) {
2613 				best_dco_centrality = dco_centrality;
2614 				best_div = dividers[d];
2615 				best_dco = dco;
2616 			}
2617 		}
2618 	}
2619 
2620 	if (best_div == 0)
2621 		return false;
2622 
2623 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2624 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2625 				  pdiv, qdiv, kdiv);
2626 
2627 	return true;
2628 }
2629 
2630 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2631 				      const struct intel_shared_dpll *pll,
2632 				      const struct intel_dpll_hw_state *pll_state)
2633 {
2634 	int ref_clock = icl_wrpll_ref_clock(i915);
2635 	u32 dco_fraction;
2636 	u32 p0, p1, p2, dco_freq;
2637 
2638 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2639 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2640 
2641 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2642 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2643 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2644 	else
2645 		p1 = 1;
2646 
2647 	switch (p0) {
2648 	case DPLL_CFGCR1_PDIV_2:
2649 		p0 = 2;
2650 		break;
2651 	case DPLL_CFGCR1_PDIV_3:
2652 		p0 = 3;
2653 		break;
2654 	case DPLL_CFGCR1_PDIV_5:
2655 		p0 = 5;
2656 		break;
2657 	case DPLL_CFGCR1_PDIV_7:
2658 		p0 = 7;
2659 		break;
2660 	}
2661 
2662 	switch (p2) {
2663 	case DPLL_CFGCR1_KDIV_1:
2664 		p2 = 1;
2665 		break;
2666 	case DPLL_CFGCR1_KDIV_2:
2667 		p2 = 2;
2668 		break;
2669 	case DPLL_CFGCR1_KDIV_3:
2670 		p2 = 3;
2671 		break;
2672 	}
2673 
2674 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2675 		   ref_clock;
2676 
2677 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2678 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2679 
2680 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2681 		dco_fraction *= 2;
2682 
2683 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2684 
2685 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2686 		return 0;
2687 
2688 	return dco_freq / (p0 * p1 * p2 * 5);
2689 }
2690 
2691 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2692 				const struct skl_wrpll_params *pll_params,
2693 				struct intel_dpll_hw_state *pll_state)
2694 {
2695 	u32 dco_fraction = pll_params->dco_fraction;
2696 
2697 	memset(pll_state, 0, sizeof(*pll_state));
2698 
2699 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2700 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2701 
2702 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2703 			    pll_params->dco_integer;
2704 
2705 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2706 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2707 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2708 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2709 
2710 	if (DISPLAY_VER(i915) >= 12)
2711 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2712 	else
2713 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2714 }
2715 
2716 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2717 				     u32 *target_dco_khz,
2718 				     struct intel_dpll_hw_state *state,
2719 				     bool is_dkl)
2720 {
2721 	u32 dco_min_freq, dco_max_freq;
2722 	int div1_vals[] = {7, 5, 3, 2};
2723 	unsigned int i;
2724 	int div2;
2725 
2726 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2727 	dco_max_freq = is_dp ? 8100000 : 10000000;
2728 
2729 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2730 		int div1 = div1_vals[i];
2731 
2732 		for (div2 = 10; div2 > 0; div2--) {
2733 			int dco = div1 * div2 * clock_khz * 5;
2734 			int a_divratio, tlinedrv, inputsel;
2735 			u32 hsdiv;
2736 
2737 			if (dco < dco_min_freq || dco > dco_max_freq)
2738 				continue;
2739 
2740 			if (div2 >= 2) {
2741 				/*
2742 				 * Note: a_divratio not matching TGL BSpec
2743 				 * algorithm but matching hardcoded values and
2744 				 * working on HW for DP alt-mode at least
2745 				 */
2746 				a_divratio = is_dp ? 10 : 5;
2747 				tlinedrv = is_dkl ? 1 : 2;
2748 			} else {
2749 				a_divratio = 5;
2750 				tlinedrv = 0;
2751 			}
2752 			inputsel = is_dp ? 0 : 1;
2753 
2754 			switch (div1) {
2755 			default:
2756 				MISSING_CASE(div1);
2757 				fallthrough;
2758 			case 2:
2759 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2760 				break;
2761 			case 3:
2762 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2763 				break;
2764 			case 5:
2765 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2766 				break;
2767 			case 7:
2768 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2769 				break;
2770 			}
2771 
2772 			*target_dco_khz = dco;
2773 
2774 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2775 
2776 			state->mg_clktop2_coreclkctl1 =
2777 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2778 
2779 			state->mg_clktop2_hsclkctl =
2780 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2781 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2782 				hsdiv |
2783 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2784 
2785 			return true;
2786 		}
2787 	}
2788 
2789 	return false;
2790 }
2791 
2792 /*
2793  * The specification for this function uses real numbers, so the math had to be
2794  * adapted to integer-only calculation, that's why it looks so different.
2795  */
2796 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2797 				  struct intel_dpll_hw_state *pll_state)
2798 {
2799 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2800 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2801 	int clock = crtc_state->port_clock;
2802 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2803 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2804 	u32 prop_coeff, int_coeff;
2805 	u32 tdc_targetcnt, feedfwgain;
2806 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2807 	u64 tmp;
2808 	bool use_ssc = false;
2809 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2810 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2811 
2812 	memset(pll_state, 0, sizeof(*pll_state));
2813 
2814 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2815 				      pll_state, is_dkl)) {
2816 		drm_dbg_kms(&dev_priv->drm,
2817 			    "Failed to find divisors for clock %d\n", clock);
2818 		return false;
2819 	}
2820 
2821 	m1div = 2;
2822 	m2div_int = dco_khz / (refclk_khz * m1div);
2823 	if (m2div_int > 255) {
2824 		if (!is_dkl) {
2825 			m1div = 4;
2826 			m2div_int = dco_khz / (refclk_khz * m1div);
2827 		}
2828 
2829 		if (m2div_int > 255) {
2830 			drm_dbg_kms(&dev_priv->drm,
2831 				    "Failed to find mdiv for clock %d\n",
2832 				    clock);
2833 			return false;
2834 		}
2835 	}
2836 	m2div_rem = dco_khz % (refclk_khz * m1div);
2837 
2838 	tmp = (u64)m2div_rem * (1 << 22);
2839 	do_div(tmp, refclk_khz * m1div);
2840 	m2div_frac = tmp;
2841 
2842 	switch (refclk_khz) {
2843 	case 19200:
2844 		iref_ndiv = 1;
2845 		iref_trim = 28;
2846 		iref_pulse_w = 1;
2847 		break;
2848 	case 24000:
2849 		iref_ndiv = 1;
2850 		iref_trim = 25;
2851 		iref_pulse_w = 2;
2852 		break;
2853 	case 38400:
2854 		iref_ndiv = 2;
2855 		iref_trim = 28;
2856 		iref_pulse_w = 1;
2857 		break;
2858 	default:
2859 		MISSING_CASE(refclk_khz);
2860 		return false;
2861 	}
2862 
2863 	/*
2864 	 * tdc_res = 0.000003
2865 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2866 	 *
2867 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2868 	 * was supposed to be a division, but we rearranged the operations of
2869 	 * the formula to avoid early divisions so we don't multiply the
2870 	 * rounding errors.
2871 	 *
2872 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2873 	 * we also rearrange to work with integers.
2874 	 *
2875 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2876 	 * last division by 10.
2877 	 */
2878 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2879 
2880 	/*
2881 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2882 	 * 32 bits. That's not a problem since we round the division down
2883 	 * anyway.
2884 	 */
2885 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2886 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2887 
2888 	if (dco_khz >= 9000000) {
2889 		prop_coeff = 5;
2890 		int_coeff = 10;
2891 	} else {
2892 		prop_coeff = 4;
2893 		int_coeff = 8;
2894 	}
2895 
2896 	if (use_ssc) {
2897 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2898 		do_div(tmp, refclk_khz * m1div * 10000);
2899 		ssc_stepsize = tmp;
2900 
2901 		tmp = mul_u32_u32(dco_khz, 1000);
2902 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2903 	} else {
2904 		ssc_stepsize = 0;
2905 		ssc_steplen = 0;
2906 	}
2907 	ssc_steplog = 4;
2908 
2909 	/* write pll_state calculations */
2910 	if (is_dkl) {
2911 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2912 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2913 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2914 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2915 
2916 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2917 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2918 
2919 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2920 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2921 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2922 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2923 
2924 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2925 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2926 
2927 		pll_state->mg_pll_tdc_coldst_bias =
2928 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2929 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2930 
2931 	} else {
2932 		pll_state->mg_pll_div0 =
2933 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2934 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2935 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2936 
2937 		pll_state->mg_pll_div1 =
2938 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2939 			MG_PLL_DIV1_DITHER_DIV_2 |
2940 			MG_PLL_DIV1_NDIVRATIO(1) |
2941 			MG_PLL_DIV1_FBPREDIV(m1div);
2942 
2943 		pll_state->mg_pll_lf =
2944 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2945 			MG_PLL_LF_AFCCNTSEL_512 |
2946 			MG_PLL_LF_GAINCTRL(1) |
2947 			MG_PLL_LF_INT_COEFF(int_coeff) |
2948 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2949 
2950 		pll_state->mg_pll_frac_lock =
2951 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2952 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2953 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2954 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2955 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2956 		if (use_ssc || m2div_rem > 0)
2957 			pll_state->mg_pll_frac_lock |=
2958 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2959 
2960 		pll_state->mg_pll_ssc =
2961 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2962 			MG_PLL_SSC_TYPE(2) |
2963 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2964 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2965 			MG_PLL_SSC_FLLEN |
2966 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2967 
2968 		pll_state->mg_pll_tdc_coldst_bias =
2969 			MG_PLL_TDC_COLDST_COLDSTART |
2970 			MG_PLL_TDC_COLDST_IREFINT_EN |
2971 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2972 			MG_PLL_TDC_TDCOVCCORR_EN |
2973 			MG_PLL_TDC_TDCSEL(3);
2974 
2975 		pll_state->mg_pll_bias =
2976 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
2977 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2978 			MG_PLL_BIAS_BIAS_BONUS(10) |
2979 			MG_PLL_BIAS_BIASCAL_EN |
2980 			MG_PLL_BIAS_CTRIM(12) |
2981 			MG_PLL_BIAS_VREF_RDAC(4) |
2982 			MG_PLL_BIAS_IREFTRIM(iref_trim);
2983 
2984 		if (refclk_khz == 38400) {
2985 			pll_state->mg_pll_tdc_coldst_bias_mask =
2986 				MG_PLL_TDC_COLDST_COLDSTART;
2987 			pll_state->mg_pll_bias_mask = 0;
2988 		} else {
2989 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2990 			pll_state->mg_pll_bias_mask = -1U;
2991 		}
2992 
2993 		pll_state->mg_pll_tdc_coldst_bias &=
2994 			pll_state->mg_pll_tdc_coldst_bias_mask;
2995 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2996 	}
2997 
2998 	return true;
2999 }
3000 
3001 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3002 				   const struct intel_shared_dpll *pll,
3003 				   const struct intel_dpll_hw_state *pll_state)
3004 {
3005 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3006 	u64 tmp;
3007 
3008 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3009 
3010 	if (DISPLAY_VER(dev_priv) >= 12) {
3011 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3012 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3013 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3014 
3015 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3016 			m2_frac = pll_state->mg_pll_bias &
3017 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3018 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3019 		} else {
3020 			m2_frac = 0;
3021 		}
3022 	} else {
3023 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3024 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3025 
3026 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3027 			m2_frac = pll_state->mg_pll_div0 &
3028 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3029 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3030 		} else {
3031 			m2_frac = 0;
3032 		}
3033 	}
3034 
3035 	switch (pll_state->mg_clktop2_hsclkctl &
3036 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3037 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3038 		div1 = 2;
3039 		break;
3040 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3041 		div1 = 3;
3042 		break;
3043 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3044 		div1 = 5;
3045 		break;
3046 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3047 		div1 = 7;
3048 		break;
3049 	default:
3050 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3051 		return 0;
3052 	}
3053 
3054 	div2 = (pll_state->mg_clktop2_hsclkctl &
3055 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3056 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3057 
3058 	/* div2 value of 0 is same as 1 means no div */
3059 	if (div2 == 0)
3060 		div2 = 1;
3061 
3062 	/*
3063 	 * Adjust the original formula to delay the division by 2^22 in order to
3064 	 * minimize possible rounding errors.
3065 	 */
3066 	tmp = (u64)m1 * m2_int * ref_clock +
3067 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3068 	tmp = div_u64(tmp, 5 * div1 * div2);
3069 
3070 	return tmp;
3071 }
3072 
3073 /**
3074  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3075  * @crtc_state: state for the CRTC to select the DPLL for
3076  * @port_dpll_id: the active @port_dpll_id to select
3077  *
3078  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3079  * CRTC.
3080  */
3081 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3082 			      enum icl_port_dpll_id port_dpll_id)
3083 {
3084 	struct icl_port_dpll *port_dpll =
3085 		&crtc_state->icl_port_dplls[port_dpll_id];
3086 
3087 	crtc_state->shared_dpll = port_dpll->pll;
3088 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3089 }
3090 
3091 static void icl_update_active_dpll(struct intel_atomic_state *state,
3092 				   struct intel_crtc *crtc,
3093 				   struct intel_encoder *encoder)
3094 {
3095 	struct intel_crtc_state *crtc_state =
3096 		intel_atomic_get_new_crtc_state(state, crtc);
3097 	struct intel_digital_port *primary_port;
3098 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3099 
3100 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3101 		enc_to_mst(encoder)->primary :
3102 		enc_to_dig_port(encoder);
3103 
3104 	if (primary_port &&
3105 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3106 	     intel_tc_port_in_legacy_mode(primary_port)))
3107 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3108 
3109 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3110 }
3111 
3112 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3113 {
3114 	if (!(i915->hti_state & HDPORT_ENABLED))
3115 		return 0;
3116 
3117 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3118 }
3119 
3120 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3121 				   struct intel_crtc *crtc,
3122 				   struct intel_encoder *encoder)
3123 {
3124 	struct intel_crtc_state *crtc_state =
3125 		intel_atomic_get_new_crtc_state(state, crtc);
3126 	struct skl_wrpll_params pll_params = { };
3127 	struct icl_port_dpll *port_dpll =
3128 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3129 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3130 	enum port port = encoder->port;
3131 	unsigned long dpll_mask;
3132 	int ret;
3133 
3134 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3135 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3136 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3137 	else
3138 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3139 
3140 	if (!ret) {
3141 		drm_dbg_kms(&dev_priv->drm,
3142 			    "Could not calculate combo PHY PLL state.\n");
3143 
3144 		return false;
3145 	}
3146 
3147 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3148 
3149 	if (IS_ALDERLAKE_S(dev_priv)) {
3150 		dpll_mask =
3151 			BIT(DPLL_ID_DG1_DPLL3) |
3152 			BIT(DPLL_ID_DG1_DPLL2) |
3153 			BIT(DPLL_ID_ICL_DPLL1) |
3154 			BIT(DPLL_ID_ICL_DPLL0);
3155 	} else if (IS_DG1(dev_priv)) {
3156 		if (port == PORT_D || port == PORT_E) {
3157 			dpll_mask =
3158 				BIT(DPLL_ID_DG1_DPLL2) |
3159 				BIT(DPLL_ID_DG1_DPLL3);
3160 		} else {
3161 			dpll_mask =
3162 				BIT(DPLL_ID_DG1_DPLL0) |
3163 				BIT(DPLL_ID_DG1_DPLL1);
3164 		}
3165 	} else if (IS_ROCKETLAKE(dev_priv)) {
3166 		dpll_mask =
3167 			BIT(DPLL_ID_EHL_DPLL4) |
3168 			BIT(DPLL_ID_ICL_DPLL1) |
3169 			BIT(DPLL_ID_ICL_DPLL0);
3170 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3171 		dpll_mask =
3172 			BIT(DPLL_ID_EHL_DPLL4) |
3173 			BIT(DPLL_ID_ICL_DPLL1) |
3174 			BIT(DPLL_ID_ICL_DPLL0);
3175 	} else {
3176 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3177 	}
3178 
3179 	/* Eliminate DPLLs from consideration if reserved by HTI */
3180 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3181 
3182 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3183 						&port_dpll->hw_state,
3184 						dpll_mask);
3185 	if (!port_dpll->pll) {
3186 		drm_dbg_kms(&dev_priv->drm,
3187 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3188 			    encoder->base.base.id, encoder->base.name);
3189 		return false;
3190 	}
3191 
3192 	intel_reference_shared_dpll(state, crtc,
3193 				    port_dpll->pll, &port_dpll->hw_state);
3194 
3195 	icl_update_active_dpll(state, crtc, encoder);
3196 
3197 	return true;
3198 }
3199 
3200 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3201 				 struct intel_crtc *crtc,
3202 				 struct intel_encoder *encoder)
3203 {
3204 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3205 	struct intel_crtc_state *crtc_state =
3206 		intel_atomic_get_new_crtc_state(state, crtc);
3207 	struct skl_wrpll_params pll_params = { };
3208 	struct icl_port_dpll *port_dpll;
3209 	enum intel_dpll_id dpll_id;
3210 
3211 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3212 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3213 		drm_dbg_kms(&dev_priv->drm,
3214 			    "Could not calculate TBT PLL state.\n");
3215 		return false;
3216 	}
3217 
3218 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3219 
3220 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3221 						&port_dpll->hw_state,
3222 						BIT(DPLL_ID_ICL_TBTPLL));
3223 	if (!port_dpll->pll) {
3224 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3225 		return false;
3226 	}
3227 	intel_reference_shared_dpll(state, crtc,
3228 				    port_dpll->pll, &port_dpll->hw_state);
3229 
3230 
3231 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3232 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3233 		drm_dbg_kms(&dev_priv->drm,
3234 			    "Could not calculate MG PHY PLL state.\n");
3235 		goto err_unreference_tbt_pll;
3236 	}
3237 
3238 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3239 							 encoder->port));
3240 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3241 						&port_dpll->hw_state,
3242 						BIT(dpll_id));
3243 	if (!port_dpll->pll) {
3244 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3245 		goto err_unreference_tbt_pll;
3246 	}
3247 	intel_reference_shared_dpll(state, crtc,
3248 				    port_dpll->pll, &port_dpll->hw_state);
3249 
3250 	icl_update_active_dpll(state, crtc, encoder);
3251 
3252 	return true;
3253 
3254 err_unreference_tbt_pll:
3255 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3256 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3257 
3258 	return false;
3259 }
3260 
3261 static bool icl_get_dplls(struct intel_atomic_state *state,
3262 			  struct intel_crtc *crtc,
3263 			  struct intel_encoder *encoder)
3264 {
3265 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3266 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3267 
3268 	if (intel_phy_is_combo(dev_priv, phy))
3269 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3270 	else if (intel_phy_is_tc(dev_priv, phy))
3271 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3272 
3273 	MISSING_CASE(phy);
3274 
3275 	return false;
3276 }
3277 
3278 static void icl_put_dplls(struct intel_atomic_state *state,
3279 			  struct intel_crtc *crtc)
3280 {
3281 	const struct intel_crtc_state *old_crtc_state =
3282 		intel_atomic_get_old_crtc_state(state, crtc);
3283 	struct intel_crtc_state *new_crtc_state =
3284 		intel_atomic_get_new_crtc_state(state, crtc);
3285 	enum icl_port_dpll_id id;
3286 
3287 	new_crtc_state->shared_dpll = NULL;
3288 
3289 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3290 		const struct icl_port_dpll *old_port_dpll =
3291 			&old_crtc_state->icl_port_dplls[id];
3292 		struct icl_port_dpll *new_port_dpll =
3293 			&new_crtc_state->icl_port_dplls[id];
3294 
3295 		new_port_dpll->pll = NULL;
3296 
3297 		if (!old_port_dpll->pll)
3298 			continue;
3299 
3300 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3301 	}
3302 }
3303 
3304 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3305 				struct intel_shared_dpll *pll,
3306 				struct intel_dpll_hw_state *hw_state)
3307 {
3308 	const enum intel_dpll_id id = pll->info->id;
3309 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3310 	intel_wakeref_t wakeref;
3311 	bool ret = false;
3312 	u32 val;
3313 
3314 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3315 
3316 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3317 						     POWER_DOMAIN_DISPLAY_CORE);
3318 	if (!wakeref)
3319 		return false;
3320 
3321 	val = intel_de_read(dev_priv, enable_reg);
3322 	if (!(val & PLL_ENABLE))
3323 		goto out;
3324 
3325 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3326 						  MG_REFCLKIN_CTL(tc_port));
3327 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3328 
3329 	hw_state->mg_clktop2_coreclkctl1 =
3330 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3331 	hw_state->mg_clktop2_coreclkctl1 &=
3332 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3333 
3334 	hw_state->mg_clktop2_hsclkctl =
3335 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3336 	hw_state->mg_clktop2_hsclkctl &=
3337 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3338 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3339 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3340 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3341 
3342 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3343 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3344 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3345 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3346 						   MG_PLL_FRAC_LOCK(tc_port));
3347 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3348 
3349 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3350 	hw_state->mg_pll_tdc_coldst_bias =
3351 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3352 
3353 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3354 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3355 		hw_state->mg_pll_bias_mask = 0;
3356 	} else {
3357 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3358 		hw_state->mg_pll_bias_mask = -1U;
3359 	}
3360 
3361 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3362 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3363 
3364 	ret = true;
3365 out:
3366 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3367 	return ret;
3368 }
3369 
3370 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3371 				 struct intel_shared_dpll *pll,
3372 				 struct intel_dpll_hw_state *hw_state)
3373 {
3374 	const enum intel_dpll_id id = pll->info->id;
3375 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3376 	intel_wakeref_t wakeref;
3377 	bool ret = false;
3378 	u32 val;
3379 
3380 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3381 						     POWER_DOMAIN_DISPLAY_CORE);
3382 	if (!wakeref)
3383 		return false;
3384 
3385 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3386 	if (!(val & PLL_ENABLE))
3387 		goto out;
3388 
3389 	/*
3390 	 * All registers read here have the same HIP_INDEX_REG even though
3391 	 * they are on different building blocks
3392 	 */
3393 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3394 		       HIP_INDEX_VAL(tc_port, 0x2));
3395 
3396 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3397 						  DKL_REFCLKIN_CTL(tc_port));
3398 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3399 
3400 	hw_state->mg_clktop2_hsclkctl =
3401 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3402 	hw_state->mg_clktop2_hsclkctl &=
3403 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3404 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3405 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3406 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3407 
3408 	hw_state->mg_clktop2_coreclkctl1 =
3409 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3410 	hw_state->mg_clktop2_coreclkctl1 &=
3411 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3412 
3413 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3414 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3415 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3416 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3417 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3418 
3419 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3420 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3421 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3422 
3423 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3424 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3425 				 DKL_PLL_SSC_STEP_LEN_MASK |
3426 				 DKL_PLL_SSC_STEP_NUM_MASK |
3427 				 DKL_PLL_SSC_EN);
3428 
3429 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3430 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3431 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3432 
3433 	hw_state->mg_pll_tdc_coldst_bias =
3434 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3435 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3436 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3437 
3438 	ret = true;
3439 out:
3440 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3441 	return ret;
3442 }
3443 
3444 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3445 				 struct intel_shared_dpll *pll,
3446 				 struct intel_dpll_hw_state *hw_state,
3447 				 i915_reg_t enable_reg)
3448 {
3449 	const enum intel_dpll_id id = pll->info->id;
3450 	intel_wakeref_t wakeref;
3451 	bool ret = false;
3452 	u32 val;
3453 
3454 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3455 						     POWER_DOMAIN_DISPLAY_CORE);
3456 	if (!wakeref)
3457 		return false;
3458 
3459 	val = intel_de_read(dev_priv, enable_reg);
3460 	if (!(val & PLL_ENABLE))
3461 		goto out;
3462 
3463 	if (IS_ALDERLAKE_S(dev_priv)) {
3464 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3465 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3466 	} else if (IS_DG1(dev_priv)) {
3467 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3468 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3469 	} else if (IS_ROCKETLAKE(dev_priv)) {
3470 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3471 						 RKL_DPLL_CFGCR0(id));
3472 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3473 						 RKL_DPLL_CFGCR1(id));
3474 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3475 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3476 						 TGL_DPLL_CFGCR0(id));
3477 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3478 						 TGL_DPLL_CFGCR1(id));
3479 	} else {
3480 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3481 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3482 							 ICL_DPLL_CFGCR0(4));
3483 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3484 							 ICL_DPLL_CFGCR1(4));
3485 		} else {
3486 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3487 							 ICL_DPLL_CFGCR0(id));
3488 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3489 							 ICL_DPLL_CFGCR1(id));
3490 		}
3491 	}
3492 
3493 	ret = true;
3494 out:
3495 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3496 	return ret;
3497 }
3498 
3499 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3500 				   struct intel_shared_dpll *pll,
3501 				   struct intel_dpll_hw_state *hw_state)
3502 {
3503 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3504 
3505 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3506 }
3507 
3508 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3509 				 struct intel_shared_dpll *pll,
3510 				 struct intel_dpll_hw_state *hw_state)
3511 {
3512 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3513 }
3514 
3515 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3516 			   struct intel_shared_dpll *pll)
3517 {
3518 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3519 	const enum intel_dpll_id id = pll->info->id;
3520 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3521 
3522 	if (IS_ALDERLAKE_S(dev_priv)) {
3523 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3524 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3525 	} else if (IS_DG1(dev_priv)) {
3526 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3527 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3528 	} else if (IS_ROCKETLAKE(dev_priv)) {
3529 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3530 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3531 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3532 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3533 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3534 	} else {
3535 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3536 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3537 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3538 		} else {
3539 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3540 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3541 		}
3542 	}
3543 
3544 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3545 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3546 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3547 }
3548 
3549 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3550 			     struct intel_shared_dpll *pll)
3551 {
3552 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3553 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3554 	u32 val;
3555 
3556 	/*
3557 	 * Some of the following registers have reserved fields, so program
3558 	 * these with RMW based on a mask. The mask can be fixed or generated
3559 	 * during the calc/readout phase if the mask depends on some other HW
3560 	 * state like refclk, see icl_calc_mg_pll_state().
3561 	 */
3562 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3563 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3564 	val |= hw_state->mg_refclkin_ctl;
3565 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3566 
3567 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3568 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3569 	val |= hw_state->mg_clktop2_coreclkctl1;
3570 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3571 
3572 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3573 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3574 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3575 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3576 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3577 	val |= hw_state->mg_clktop2_hsclkctl;
3578 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3579 
3580 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3581 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3582 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3583 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3584 		       hw_state->mg_pll_frac_lock);
3585 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3586 
3587 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3588 	val &= ~hw_state->mg_pll_bias_mask;
3589 	val |= hw_state->mg_pll_bias;
3590 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3591 
3592 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3593 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3594 	val |= hw_state->mg_pll_tdc_coldst_bias;
3595 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3596 
3597 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3598 }
3599 
3600 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3601 			  struct intel_shared_dpll *pll)
3602 {
3603 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3604 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3605 	u32 val;
3606 
3607 	/*
3608 	 * All registers programmed here have the same HIP_INDEX_REG even
3609 	 * though on different building block
3610 	 */
3611 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3612 		       HIP_INDEX_VAL(tc_port, 0x2));
3613 
3614 	/* All the registers are RMW */
3615 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3616 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3617 	val |= hw_state->mg_refclkin_ctl;
3618 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3619 
3620 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3621 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3622 	val |= hw_state->mg_clktop2_coreclkctl1;
3623 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3624 
3625 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3626 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3627 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3628 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3629 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3630 	val |= hw_state->mg_clktop2_hsclkctl;
3631 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3632 
3633 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3634 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3635 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3636 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3637 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3638 	val |= hw_state->mg_pll_div0;
3639 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3640 
3641 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3642 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3643 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3644 	val |= hw_state->mg_pll_div1;
3645 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3646 
3647 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3648 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3649 		 DKL_PLL_SSC_STEP_LEN_MASK |
3650 		 DKL_PLL_SSC_STEP_NUM_MASK |
3651 		 DKL_PLL_SSC_EN);
3652 	val |= hw_state->mg_pll_ssc;
3653 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3654 
3655 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3656 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3657 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3658 	val |= hw_state->mg_pll_bias;
3659 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3660 
3661 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3662 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3663 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3664 	val |= hw_state->mg_pll_tdc_coldst_bias;
3665 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3666 
3667 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3668 }
3669 
3670 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3671 				 struct intel_shared_dpll *pll,
3672 				 i915_reg_t enable_reg)
3673 {
3674 	u32 val;
3675 
3676 	val = intel_de_read(dev_priv, enable_reg);
3677 	val |= PLL_POWER_ENABLE;
3678 	intel_de_write(dev_priv, enable_reg, val);
3679 
3680 	/*
3681 	 * The spec says we need to "wait" but it also says it should be
3682 	 * immediate.
3683 	 */
3684 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3685 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3686 			pll->info->id);
3687 }
3688 
3689 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3690 			   struct intel_shared_dpll *pll,
3691 			   i915_reg_t enable_reg)
3692 {
3693 	u32 val;
3694 
3695 	val = intel_de_read(dev_priv, enable_reg);
3696 	val |= PLL_ENABLE;
3697 	intel_de_write(dev_priv, enable_reg, val);
3698 
3699 	/* Timeout is actually 600us. */
3700 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3701 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3702 }
3703 
3704 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3705 {
3706 	u32 val;
3707 
3708 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3709 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3710 		return;
3711 	/*
3712 	 * Wa_16011069516:adl-p[a0]
3713 	 *
3714 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3715 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3716 	 * sanity check this assumption with a double read, which presumably
3717 	 * returns the correct value even with clock gating on.
3718 	 *
3719 	 * Instead of the usual place for workarounds we apply this one here,
3720 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3721 	 */
3722 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3723 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3724 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3725 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3726 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3727 }
3728 
3729 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3730 			     struct intel_shared_dpll *pll)
3731 {
3732 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3733 
3734 	if (IS_JSL_EHL(dev_priv) &&
3735 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3736 
3737 		/*
3738 		 * We need to disable DC states when this DPLL is enabled.
3739 		 * This can be done by taking a reference on DPLL4 power
3740 		 * domain.
3741 		 */
3742 		pll->wakeref = intel_display_power_get(dev_priv,
3743 						       POWER_DOMAIN_DPLL_DC_OFF);
3744 	}
3745 
3746 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3747 
3748 	icl_dpll_write(dev_priv, pll);
3749 
3750 	/*
3751 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3752 	 * paths should already be setting the appropriate voltage, hence we do
3753 	 * nothing here.
3754 	 */
3755 
3756 	icl_pll_enable(dev_priv, pll, enable_reg);
3757 
3758 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3759 
3760 	/* DVFS post sequence would be here. See the comment above. */
3761 }
3762 
3763 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3764 			   struct intel_shared_dpll *pll)
3765 {
3766 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3767 
3768 	icl_dpll_write(dev_priv, pll);
3769 
3770 	/*
3771 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3772 	 * paths should already be setting the appropriate voltage, hence we do
3773 	 * nothing here.
3774 	 */
3775 
3776 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3777 
3778 	/* DVFS post sequence would be here. See the comment above. */
3779 }
3780 
3781 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3782 			  struct intel_shared_dpll *pll)
3783 {
3784 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3785 
3786 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3787 
3788 	if (DISPLAY_VER(dev_priv) >= 12)
3789 		dkl_pll_write(dev_priv, pll);
3790 	else
3791 		icl_mg_pll_write(dev_priv, pll);
3792 
3793 	/*
3794 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3795 	 * paths should already be setting the appropriate voltage, hence we do
3796 	 * nothing here.
3797 	 */
3798 
3799 	icl_pll_enable(dev_priv, pll, enable_reg);
3800 
3801 	/* DVFS post sequence would be here. See the comment above. */
3802 }
3803 
3804 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3805 			    struct intel_shared_dpll *pll,
3806 			    i915_reg_t enable_reg)
3807 {
3808 	u32 val;
3809 
3810 	/* The first steps are done by intel_ddi_post_disable(). */
3811 
3812 	/*
3813 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3814 	 * paths should already be setting the appropriate voltage, hence we do
3815 	 * nothing here.
3816 	 */
3817 
3818 	val = intel_de_read(dev_priv, enable_reg);
3819 	val &= ~PLL_ENABLE;
3820 	intel_de_write(dev_priv, enable_reg, val);
3821 
3822 	/* Timeout is actually 1us. */
3823 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3824 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3825 
3826 	/* DVFS post sequence would be here. See the comment above. */
3827 
3828 	val = intel_de_read(dev_priv, enable_reg);
3829 	val &= ~PLL_POWER_ENABLE;
3830 	intel_de_write(dev_priv, enable_reg, val);
3831 
3832 	/*
3833 	 * The spec says we need to "wait" but it also says it should be
3834 	 * immediate.
3835 	 */
3836 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3837 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3838 			pll->info->id);
3839 }
3840 
3841 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3842 			      struct intel_shared_dpll *pll)
3843 {
3844 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3845 
3846 	icl_pll_disable(dev_priv, pll, enable_reg);
3847 
3848 	if (IS_JSL_EHL(dev_priv) &&
3849 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3850 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3851 					pll->wakeref);
3852 }
3853 
3854 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3855 			    struct intel_shared_dpll *pll)
3856 {
3857 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3858 }
3859 
3860 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3861 			   struct intel_shared_dpll *pll)
3862 {
3863 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3864 
3865 	icl_pll_disable(dev_priv, pll, enable_reg);
3866 }
3867 
3868 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3869 {
3870 	/* No SSC ref */
3871 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3872 }
3873 
3874 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3875 			      const struct intel_dpll_hw_state *hw_state)
3876 {
3877 	drm_dbg_kms(&dev_priv->drm,
3878 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3879 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3880 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3881 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3882 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3883 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3884 		    hw_state->cfgcr0, hw_state->cfgcr1,
3885 		    hw_state->mg_refclkin_ctl,
3886 		    hw_state->mg_clktop2_coreclkctl1,
3887 		    hw_state->mg_clktop2_hsclkctl,
3888 		    hw_state->mg_pll_div0,
3889 		    hw_state->mg_pll_div1,
3890 		    hw_state->mg_pll_lf,
3891 		    hw_state->mg_pll_frac_lock,
3892 		    hw_state->mg_pll_ssc,
3893 		    hw_state->mg_pll_bias,
3894 		    hw_state->mg_pll_tdc_coldst_bias);
3895 }
3896 
3897 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3898 	.enable = combo_pll_enable,
3899 	.disable = combo_pll_disable,
3900 	.get_hw_state = combo_pll_get_hw_state,
3901 	.get_freq = icl_ddi_combo_pll_get_freq,
3902 };
3903 
3904 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3905 	.enable = tbt_pll_enable,
3906 	.disable = tbt_pll_disable,
3907 	.get_hw_state = tbt_pll_get_hw_state,
3908 	.get_freq = icl_ddi_tbt_pll_get_freq,
3909 };
3910 
3911 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3912 	.enable = mg_pll_enable,
3913 	.disable = mg_pll_disable,
3914 	.get_hw_state = mg_pll_get_hw_state,
3915 	.get_freq = icl_ddi_mg_pll_get_freq,
3916 };
3917 
3918 static const struct dpll_info icl_plls[] = {
3919 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3920 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3921 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3922 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3923 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3924 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3925 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3926 	{ },
3927 };
3928 
3929 static const struct intel_dpll_mgr icl_pll_mgr = {
3930 	.dpll_info = icl_plls,
3931 	.get_dplls = icl_get_dplls,
3932 	.put_dplls = icl_put_dplls,
3933 	.update_active_dpll = icl_update_active_dpll,
3934 	.update_ref_clks = icl_update_dpll_ref_clks,
3935 	.dump_hw_state = icl_dump_hw_state,
3936 };
3937 
3938 static const struct dpll_info ehl_plls[] = {
3939 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3940 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3941 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3942 	{ },
3943 };
3944 
3945 static const struct intel_dpll_mgr ehl_pll_mgr = {
3946 	.dpll_info = ehl_plls,
3947 	.get_dplls = icl_get_dplls,
3948 	.put_dplls = icl_put_dplls,
3949 	.update_ref_clks = icl_update_dpll_ref_clks,
3950 	.dump_hw_state = icl_dump_hw_state,
3951 };
3952 
3953 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3954 	.enable = mg_pll_enable,
3955 	.disable = mg_pll_disable,
3956 	.get_hw_state = dkl_pll_get_hw_state,
3957 	.get_freq = icl_ddi_mg_pll_get_freq,
3958 };
3959 
3960 static const struct dpll_info tgl_plls[] = {
3961 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3962 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3963 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3964 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3965 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3966 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3967 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3968 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3969 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3970 	{ },
3971 };
3972 
3973 static const struct intel_dpll_mgr tgl_pll_mgr = {
3974 	.dpll_info = tgl_plls,
3975 	.get_dplls = icl_get_dplls,
3976 	.put_dplls = icl_put_dplls,
3977 	.update_active_dpll = icl_update_active_dpll,
3978 	.update_ref_clks = icl_update_dpll_ref_clks,
3979 	.dump_hw_state = icl_dump_hw_state,
3980 };
3981 
3982 static const struct dpll_info rkl_plls[] = {
3983 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3984 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3985 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3986 	{ },
3987 };
3988 
3989 static const struct intel_dpll_mgr rkl_pll_mgr = {
3990 	.dpll_info = rkl_plls,
3991 	.get_dplls = icl_get_dplls,
3992 	.put_dplls = icl_put_dplls,
3993 	.update_ref_clks = icl_update_dpll_ref_clks,
3994 	.dump_hw_state = icl_dump_hw_state,
3995 };
3996 
3997 static const struct dpll_info dg1_plls[] = {
3998 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
3999 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4000 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4001 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4002 	{ },
4003 };
4004 
4005 static const struct intel_dpll_mgr dg1_pll_mgr = {
4006 	.dpll_info = dg1_plls,
4007 	.get_dplls = icl_get_dplls,
4008 	.put_dplls = icl_put_dplls,
4009 	.update_ref_clks = icl_update_dpll_ref_clks,
4010 	.dump_hw_state = icl_dump_hw_state,
4011 };
4012 
4013 static const struct dpll_info adls_plls[] = {
4014 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4015 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4016 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4017 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4018 	{ },
4019 };
4020 
4021 static const struct intel_dpll_mgr adls_pll_mgr = {
4022 	.dpll_info = adls_plls,
4023 	.get_dplls = icl_get_dplls,
4024 	.put_dplls = icl_put_dplls,
4025 	.update_ref_clks = icl_update_dpll_ref_clks,
4026 	.dump_hw_state = icl_dump_hw_state,
4027 };
4028 
4029 static const struct dpll_info adlp_plls[] = {
4030 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4031 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4032 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4033 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4034 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4035 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4036 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4037 	{ },
4038 };
4039 
4040 static const struct intel_dpll_mgr adlp_pll_mgr = {
4041 	.dpll_info = adlp_plls,
4042 	.get_dplls = icl_get_dplls,
4043 	.put_dplls = icl_put_dplls,
4044 	.update_active_dpll = icl_update_active_dpll,
4045 	.update_ref_clks = icl_update_dpll_ref_clks,
4046 	.dump_hw_state = icl_dump_hw_state,
4047 };
4048 
4049 /**
4050  * intel_shared_dpll_init - Initialize shared DPLLs
4051  * @dev: drm device
4052  *
4053  * Initialize shared DPLLs for @dev.
4054  */
4055 void intel_shared_dpll_init(struct drm_device *dev)
4056 {
4057 	struct drm_i915_private *dev_priv = to_i915(dev);
4058 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4059 	const struct dpll_info *dpll_info;
4060 	int i;
4061 
4062 	if (IS_DG2(dev_priv))
4063 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4064 		dpll_mgr = NULL;
4065 	else if (IS_ALDERLAKE_P(dev_priv))
4066 		dpll_mgr = &adlp_pll_mgr;
4067 	else if (IS_ALDERLAKE_S(dev_priv))
4068 		dpll_mgr = &adls_pll_mgr;
4069 	else if (IS_DG1(dev_priv))
4070 		dpll_mgr = &dg1_pll_mgr;
4071 	else if (IS_ROCKETLAKE(dev_priv))
4072 		dpll_mgr = &rkl_pll_mgr;
4073 	else if (DISPLAY_VER(dev_priv) >= 12)
4074 		dpll_mgr = &tgl_pll_mgr;
4075 	else if (IS_JSL_EHL(dev_priv))
4076 		dpll_mgr = &ehl_pll_mgr;
4077 	else if (DISPLAY_VER(dev_priv) >= 11)
4078 		dpll_mgr = &icl_pll_mgr;
4079 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4080 		dpll_mgr = &bxt_pll_mgr;
4081 	else if (DISPLAY_VER(dev_priv) == 9)
4082 		dpll_mgr = &skl_pll_mgr;
4083 	else if (HAS_DDI(dev_priv))
4084 		dpll_mgr = &hsw_pll_mgr;
4085 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4086 		dpll_mgr = &pch_pll_mgr;
4087 
4088 	if (!dpll_mgr) {
4089 		dev_priv->dpll.num_shared_dpll = 0;
4090 		return;
4091 	}
4092 
4093 	dpll_info = dpll_mgr->dpll_info;
4094 
4095 	for (i = 0; dpll_info[i].name; i++) {
4096 		drm_WARN_ON(dev, i != dpll_info[i].id);
4097 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4098 	}
4099 
4100 	dev_priv->dpll.mgr = dpll_mgr;
4101 	dev_priv->dpll.num_shared_dpll = i;
4102 	mutex_init(&dev_priv->dpll.lock);
4103 
4104 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4105 }
4106 
4107 /**
4108  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4109  * @state: atomic state
4110  * @crtc: CRTC to reserve DPLLs for
4111  * @encoder: encoder
4112  *
4113  * This function reserves all required DPLLs for the given CRTC and encoder
4114  * combination in the current atomic commit @state and the new @crtc atomic
4115  * state.
4116  *
4117  * The new configuration in the atomic commit @state is made effective by
4118  * calling intel_shared_dpll_swap_state().
4119  *
4120  * The reserved DPLLs should be released by calling
4121  * intel_release_shared_dplls().
4122  *
4123  * Returns:
4124  * True if all required DPLLs were successfully reserved.
4125  */
4126 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4127 				struct intel_crtc *crtc,
4128 				struct intel_encoder *encoder)
4129 {
4130 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4131 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4132 
4133 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4134 		return false;
4135 
4136 	return dpll_mgr->get_dplls(state, crtc, encoder);
4137 }
4138 
4139 /**
4140  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4141  * @state: atomic state
4142  * @crtc: crtc from which the DPLLs are to be released
4143  *
4144  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4145  * from the current atomic commit @state and the old @crtc atomic state.
4146  *
4147  * The new configuration in the atomic commit @state is made effective by
4148  * calling intel_shared_dpll_swap_state().
4149  */
4150 void intel_release_shared_dplls(struct intel_atomic_state *state,
4151 				struct intel_crtc *crtc)
4152 {
4153 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4154 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4155 
4156 	/*
4157 	 * FIXME: this function is called for every platform having a
4158 	 * compute_clock hook, even though the platform doesn't yet support
4159 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4160 	 * called on those.
4161 	 */
4162 	if (!dpll_mgr)
4163 		return;
4164 
4165 	dpll_mgr->put_dplls(state, crtc);
4166 }
4167 
4168 /**
4169  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4170  * @state: atomic state
4171  * @crtc: the CRTC for which to update the active DPLL
4172  * @encoder: encoder determining the type of port DPLL
4173  *
4174  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4175  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4176  * DPLL selected will be based on the current mode of the encoder's port.
4177  */
4178 void intel_update_active_dpll(struct intel_atomic_state *state,
4179 			      struct intel_crtc *crtc,
4180 			      struct intel_encoder *encoder)
4181 {
4182 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4183 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4184 
4185 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4186 		return;
4187 
4188 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4189 }
4190 
4191 /**
4192  * intel_dpll_get_freq - calculate the DPLL's output frequency
4193  * @i915: i915 device
4194  * @pll: DPLL for which to calculate the output frequency
4195  * @pll_state: DPLL state from which to calculate the output frequency
4196  *
4197  * Return the output frequency corresponding to @pll's passed in @pll_state.
4198  */
4199 int intel_dpll_get_freq(struct drm_i915_private *i915,
4200 			const struct intel_shared_dpll *pll,
4201 			const struct intel_dpll_hw_state *pll_state)
4202 {
4203 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4204 		return 0;
4205 
4206 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4207 }
4208 
4209 /**
4210  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4211  * @i915: i915 device
4212  * @pll: DPLL for which to calculate the output frequency
4213  * @hw_state: DPLL's hardware state
4214  *
4215  * Read out @pll's hardware state into @hw_state.
4216  */
4217 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4218 			     struct intel_shared_dpll *pll,
4219 			     struct intel_dpll_hw_state *hw_state)
4220 {
4221 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4222 }
4223 
4224 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4225 				  struct intel_shared_dpll *pll)
4226 {
4227 	struct intel_crtc *crtc;
4228 
4229 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4230 
4231 	if (IS_JSL_EHL(i915) && pll->on &&
4232 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4233 		pll->wakeref = intel_display_power_get(i915,
4234 						       POWER_DOMAIN_DPLL_DC_OFF);
4235 	}
4236 
4237 	pll->state.pipe_mask = 0;
4238 	for_each_intel_crtc(&i915->drm, crtc) {
4239 		struct intel_crtc_state *crtc_state =
4240 			to_intel_crtc_state(crtc->base.state);
4241 
4242 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4243 			pll->state.pipe_mask |= BIT(crtc->pipe);
4244 	}
4245 	pll->active_mask = pll->state.pipe_mask;
4246 
4247 	drm_dbg_kms(&i915->drm,
4248 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4249 		    pll->info->name, pll->state.pipe_mask, pll->on);
4250 }
4251 
4252 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4253 {
4254 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4255 		i915->dpll.mgr->update_ref_clks(i915);
4256 }
4257 
4258 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4259 {
4260 	int i;
4261 
4262 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4263 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4264 }
4265 
4266 static void sanitize_dpll_state(struct drm_i915_private *i915,
4267 				struct intel_shared_dpll *pll)
4268 {
4269 	if (!pll->on)
4270 		return;
4271 
4272 	adlp_cmtg_clock_gating_wa(i915, pll);
4273 
4274 	if (pll->active_mask)
4275 		return;
4276 
4277 	drm_dbg_kms(&i915->drm,
4278 		    "%s enabled but not in use, disabling\n",
4279 		    pll->info->name);
4280 
4281 	pll->info->funcs->disable(i915, pll);
4282 	pll->on = false;
4283 }
4284 
4285 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4286 {
4287 	int i;
4288 
4289 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4290 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4291 }
4292 
4293 /**
4294  * intel_dpll_dump_hw_state - write hw_state to dmesg
4295  * @dev_priv: i915 drm device
4296  * @hw_state: hw state to be written to the log
4297  *
4298  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4299  */
4300 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4301 			      const struct intel_dpll_hw_state *hw_state)
4302 {
4303 	if (dev_priv->dpll.mgr) {
4304 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4305 	} else {
4306 		/* fallback for platforms that don't use the shared dpll
4307 		 * infrastructure
4308 		 */
4309 		drm_dbg_kms(&dev_priv->drm,
4310 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4311 			    "fp0: 0x%x, fp1: 0x%x\n",
4312 			    hw_state->dpll,
4313 			    hw_state->dpll_md,
4314 			    hw_state->fp0,
4315 			    hw_state->fp1);
4316 	}
4317 }
4318