1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/math.h>
25 #include <linux/string_helpers.h>
26 
27 #include "i915_reg.h"
28 #include "intel_de.h"
29 #include "intel_display_types.h"
30 #include "intel_dkl_phy.h"
31 #include "intel_dkl_phy_regs.h"
32 #include "intel_dpio_phy.h"
33 #include "intel_dpll.h"
34 #include "intel_dpll_mgr.h"
35 #include "intel_hti.h"
36 #include "intel_mg_phy_regs.h"
37 #include "intel_pch_refclk.h"
38 #include "intel_tc.h"
39 
40 /**
41  * DOC: Display PLLs
42  *
43  * Display PLLs used for driving outputs vary by platform. While some have
44  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
45  * from a pool. In the latter scenario, it is possible that multiple pipes
46  * share a PLL if their configurations match.
47  *
48  * This file provides an abstraction over display PLLs. The function
49  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
50  * users of a PLL are tracked and that tracking is integrated with the atomic
51  * modset interface. During an atomic operation, required PLLs can be reserved
52  * for a given CRTC and encoder configuration by calling
53  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
54  * with intel_release_shared_dplls().
55  * Changes to the users are first staged in the atomic state, and then made
56  * effective by calling intel_shared_dpll_swap_state() during the atomic
57  * commit phase.
58  */
59 
60 /* platform specific hooks for managing DPLLs */
61 struct intel_shared_dpll_funcs {
62 	/*
63 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
64 	 * the pll is not already enabled.
65 	 */
66 	void (*enable)(struct drm_i915_private *i915,
67 		       struct intel_shared_dpll *pll);
68 
69 	/*
70 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
71 	 * only when it is safe to disable the pll, i.e., there are no more
72 	 * tracked users for it.
73 	 */
74 	void (*disable)(struct drm_i915_private *i915,
75 			struct intel_shared_dpll *pll);
76 
77 	/*
78 	 * Hook for reading the values currently programmed to the DPLL
79 	 * registers. This is used for initial hw state readout and state
80 	 * verification after a mode set.
81 	 */
82 	bool (*get_hw_state)(struct drm_i915_private *i915,
83 			     struct intel_shared_dpll *pll,
84 			     struct intel_dpll_hw_state *hw_state);
85 
86 	/*
87 	 * Hook for calculating the pll's output frequency based on its passed
88 	 * in state.
89 	 */
90 	int (*get_freq)(struct drm_i915_private *i915,
91 			const struct intel_shared_dpll *pll,
92 			const struct intel_dpll_hw_state *pll_state);
93 };
94 
95 struct intel_dpll_mgr {
96 	const struct dpll_info *dpll_info;
97 
98 	int (*compute_dplls)(struct intel_atomic_state *state,
99 			     struct intel_crtc *crtc,
100 			     struct intel_encoder *encoder);
101 	int (*get_dplls)(struct intel_atomic_state *state,
102 			 struct intel_crtc *crtc,
103 			 struct intel_encoder *encoder);
104 	void (*put_dplls)(struct intel_atomic_state *state,
105 			  struct intel_crtc *crtc);
106 	void (*update_active_dpll)(struct intel_atomic_state *state,
107 				   struct intel_crtc *crtc,
108 				   struct intel_encoder *encoder);
109 	void (*update_ref_clks)(struct drm_i915_private *i915);
110 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
111 			      const struct intel_dpll_hw_state *hw_state);
112 };
113 
114 static void
115 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
116 				  struct intel_shared_dpll_state *shared_dpll)
117 {
118 	enum intel_dpll_id i;
119 
120 	/* Copy shared dpll state */
121 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
122 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
123 
124 		shared_dpll[i] = pll->state;
125 	}
126 }
127 
128 static struct intel_shared_dpll_state *
129 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
130 {
131 	struct intel_atomic_state *state = to_intel_atomic_state(s);
132 
133 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
134 
135 	if (!state->dpll_set) {
136 		state->dpll_set = true;
137 
138 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
139 						  state->shared_dpll);
140 	}
141 
142 	return state->shared_dpll;
143 }
144 
145 /**
146  * intel_get_shared_dpll_by_id - get a DPLL given its id
147  * @dev_priv: i915 device instance
148  * @id: pll id
149  *
150  * Returns:
151  * A pointer to the DPLL with @id
152  */
153 struct intel_shared_dpll *
154 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
155 			    enum intel_dpll_id id)
156 {
157 	return &dev_priv->display.dpll.shared_dplls[id];
158 }
159 
160 /* For ILK+ */
161 void assert_shared_dpll(struct drm_i915_private *dev_priv,
162 			struct intel_shared_dpll *pll,
163 			bool state)
164 {
165 	bool cur_state;
166 	struct intel_dpll_hw_state hw_state;
167 
168 	if (drm_WARN(&dev_priv->drm, !pll,
169 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
170 		return;
171 
172 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
173 	I915_STATE_WARN(dev_priv, cur_state != state,
174 			"%s assertion failure (expected %s, current %s)\n",
175 			pll->info->name, str_on_off(state),
176 			str_on_off(cur_state));
177 }
178 
179 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
180 {
181 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
182 }
183 
184 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
185 {
186 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
187 }
188 
189 static i915_reg_t
190 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
191 			   struct intel_shared_dpll *pll)
192 {
193 	if (IS_DG1(i915))
194 		return DG1_DPLL_ENABLE(pll->info->id);
195 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
196 		return MG_PLL_ENABLE(0);
197 
198 	return ICL_DPLL_ENABLE(pll->info->id);
199 }
200 
201 static i915_reg_t
202 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
203 			struct intel_shared_dpll *pll)
204 {
205 	const enum intel_dpll_id id = pll->info->id;
206 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
207 
208 	if (IS_ALDERLAKE_P(i915))
209 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
210 
211 	return MG_PLL_ENABLE(tc_port);
212 }
213 
214 /**
215  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
216  * @crtc_state: CRTC, and its state, which has a shared DPLL
217  *
218  * Enable the shared DPLL used by @crtc.
219  */
220 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
221 {
222 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
223 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
224 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
225 	unsigned int pipe_mask = BIT(crtc->pipe);
226 	unsigned int old_mask;
227 
228 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
229 		return;
230 
231 	mutex_lock(&dev_priv->display.dpll.lock);
232 	old_mask = pll->active_mask;
233 
234 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
235 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
236 		goto out;
237 
238 	pll->active_mask |= pipe_mask;
239 
240 	drm_dbg_kms(&dev_priv->drm,
241 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
242 		    pll->info->name, pll->active_mask, pll->on,
243 		    crtc->base.base.id, crtc->base.name);
244 
245 	if (old_mask) {
246 		drm_WARN_ON(&dev_priv->drm, !pll->on);
247 		assert_shared_dpll_enabled(dev_priv, pll);
248 		goto out;
249 	}
250 	drm_WARN_ON(&dev_priv->drm, pll->on);
251 
252 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
253 	pll->info->funcs->enable(dev_priv, pll);
254 	pll->on = true;
255 
256 out:
257 	mutex_unlock(&dev_priv->display.dpll.lock);
258 }
259 
260 /**
261  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
262  * @crtc_state: CRTC, and its state, which has a shared DPLL
263  *
264  * Disable the shared DPLL used by @crtc.
265  */
266 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
267 {
268 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
269 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
270 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
271 	unsigned int pipe_mask = BIT(crtc->pipe);
272 
273 	/* PCH only available on ILK+ */
274 	if (DISPLAY_VER(dev_priv) < 5)
275 		return;
276 
277 	if (pll == NULL)
278 		return;
279 
280 	mutex_lock(&dev_priv->display.dpll.lock);
281 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
282 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
283 		     crtc->base.base.id, crtc->base.name))
284 		goto out;
285 
286 	drm_dbg_kms(&dev_priv->drm,
287 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
288 		    pll->info->name, pll->active_mask, pll->on,
289 		    crtc->base.base.id, crtc->base.name);
290 
291 	assert_shared_dpll_enabled(dev_priv, pll);
292 	drm_WARN_ON(&dev_priv->drm, !pll->on);
293 
294 	pll->active_mask &= ~pipe_mask;
295 	if (pll->active_mask)
296 		goto out;
297 
298 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
299 	pll->info->funcs->disable(dev_priv, pll);
300 	pll->on = false;
301 
302 out:
303 	mutex_unlock(&dev_priv->display.dpll.lock);
304 }
305 
306 static struct intel_shared_dpll *
307 intel_find_shared_dpll(struct intel_atomic_state *state,
308 		       const struct intel_crtc *crtc,
309 		       const struct intel_dpll_hw_state *pll_state,
310 		       unsigned long dpll_mask)
311 {
312 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
313 	struct intel_shared_dpll *pll, *unused_pll = NULL;
314 	struct intel_shared_dpll_state *shared_dpll;
315 	enum intel_dpll_id i;
316 
317 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
318 
319 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
320 
321 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
322 		pll = &dev_priv->display.dpll.shared_dplls[i];
323 
324 		/* Only want to check enabled timings first */
325 		if (shared_dpll[i].pipe_mask == 0) {
326 			if (!unused_pll)
327 				unused_pll = pll;
328 			continue;
329 		}
330 
331 		if (memcmp(pll_state,
332 			   &shared_dpll[i].hw_state,
333 			   sizeof(*pll_state)) == 0) {
334 			drm_dbg_kms(&dev_priv->drm,
335 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
336 				    crtc->base.base.id, crtc->base.name,
337 				    pll->info->name,
338 				    shared_dpll[i].pipe_mask,
339 				    pll->active_mask);
340 			return pll;
341 		}
342 	}
343 
344 	/* Ok no matching timings, maybe there's a free one? */
345 	if (unused_pll) {
346 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
347 			    crtc->base.base.id, crtc->base.name,
348 			    unused_pll->info->name);
349 		return unused_pll;
350 	}
351 
352 	return NULL;
353 }
354 
355 /**
356  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
357  * @crtc: CRTC on which behalf the reference is taken
358  * @pll: DPLL for which the reference is taken
359  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
360  *
361  * Take a reference for @pll tracking the use of it by @crtc.
362  */
363 static void
364 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
365 				 const struct intel_shared_dpll *pll,
366 				 struct intel_shared_dpll_state *shared_dpll_state)
367 {
368 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
369 
370 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
371 
372 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
373 
374 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
375 		    crtc->base.base.id, crtc->base.name, pll->info->name);
376 }
377 
378 static void
379 intel_reference_shared_dpll(struct intel_atomic_state *state,
380 			    const struct intel_crtc *crtc,
381 			    const struct intel_shared_dpll *pll,
382 			    const struct intel_dpll_hw_state *pll_state)
383 {
384 	struct intel_shared_dpll_state *shared_dpll;
385 	const enum intel_dpll_id id = pll->info->id;
386 
387 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
388 
389 	if (shared_dpll[id].pipe_mask == 0)
390 		shared_dpll[id].hw_state = *pll_state;
391 
392 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
393 }
394 
395 /**
396  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
397  * @crtc: CRTC on which behalf the reference is dropped
398  * @pll: DPLL for which the reference is dropped
399  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
400  *
401  * Drop a reference for @pll tracking the end of use of it by @crtc.
402  */
403 void
404 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
405 				   const struct intel_shared_dpll *pll,
406 				   struct intel_shared_dpll_state *shared_dpll_state)
407 {
408 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
409 
410 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
411 
412 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
413 
414 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
415 		    crtc->base.base.id, crtc->base.name, pll->info->name);
416 }
417 
418 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
419 					  const struct intel_crtc *crtc,
420 					  const struct intel_shared_dpll *pll)
421 {
422 	struct intel_shared_dpll_state *shared_dpll;
423 	const enum intel_dpll_id id = pll->info->id;
424 
425 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
426 
427 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
428 }
429 
430 static void intel_put_dpll(struct intel_atomic_state *state,
431 			   struct intel_crtc *crtc)
432 {
433 	const struct intel_crtc_state *old_crtc_state =
434 		intel_atomic_get_old_crtc_state(state, crtc);
435 	struct intel_crtc_state *new_crtc_state =
436 		intel_atomic_get_new_crtc_state(state, crtc);
437 
438 	new_crtc_state->shared_dpll = NULL;
439 
440 	if (!old_crtc_state->shared_dpll)
441 		return;
442 
443 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
444 }
445 
446 /**
447  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
448  * @state: atomic state
449  *
450  * This is the dpll version of drm_atomic_helper_swap_state() since the
451  * helper does not handle driver-specific global state.
452  *
453  * For consistency with atomic helpers this function does a complete swap,
454  * i.e. it also puts the current state into @state, even though there is no
455  * need for that at this moment.
456  */
457 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
458 {
459 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
460 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
461 	enum intel_dpll_id i;
462 
463 	if (!state->dpll_set)
464 		return;
465 
466 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
467 		struct intel_shared_dpll *pll =
468 			&dev_priv->display.dpll.shared_dplls[i];
469 
470 		swap(pll->state, shared_dpll[i]);
471 	}
472 }
473 
474 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
475 				      struct intel_shared_dpll *pll,
476 				      struct intel_dpll_hw_state *hw_state)
477 {
478 	const enum intel_dpll_id id = pll->info->id;
479 	intel_wakeref_t wakeref;
480 	u32 val;
481 
482 	wakeref = intel_display_power_get_if_enabled(dev_priv,
483 						     POWER_DOMAIN_DISPLAY_CORE);
484 	if (!wakeref)
485 		return false;
486 
487 	val = intel_de_read(dev_priv, PCH_DPLL(id));
488 	hw_state->dpll = val;
489 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
490 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
491 
492 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
493 
494 	return val & DPLL_VCO_ENABLE;
495 }
496 
497 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
498 {
499 	u32 val;
500 	bool enabled;
501 
502 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
503 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
504 			    DREF_SUPERSPREAD_SOURCE_MASK));
505 	I915_STATE_WARN(dev_priv, !enabled,
506 			"PCH refclk assertion failure, should be active but is disabled\n");
507 }
508 
509 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
510 				struct intel_shared_dpll *pll)
511 {
512 	const enum intel_dpll_id id = pll->info->id;
513 
514 	/* PCH refclock must be enabled first */
515 	ibx_assert_pch_refclk_enabled(dev_priv);
516 
517 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
518 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
519 
520 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
521 
522 	/* Wait for the clocks to stabilize. */
523 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
524 	udelay(150);
525 
526 	/* The pixel multiplier can only be updated once the
527 	 * DPLL is enabled and the clocks are stable.
528 	 *
529 	 * So write it again.
530 	 */
531 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
532 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
533 	udelay(200);
534 }
535 
536 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
537 				 struct intel_shared_dpll *pll)
538 {
539 	const enum intel_dpll_id id = pll->info->id;
540 
541 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
542 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
543 	udelay(200);
544 }
545 
546 static int ibx_compute_dpll(struct intel_atomic_state *state,
547 			    struct intel_crtc *crtc,
548 			    struct intel_encoder *encoder)
549 {
550 	return 0;
551 }
552 
553 static int ibx_get_dpll(struct intel_atomic_state *state,
554 			struct intel_crtc *crtc,
555 			struct intel_encoder *encoder)
556 {
557 	struct intel_crtc_state *crtc_state =
558 		intel_atomic_get_new_crtc_state(state, crtc);
559 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
560 	struct intel_shared_dpll *pll;
561 	enum intel_dpll_id i;
562 
563 	if (HAS_PCH_IBX(dev_priv)) {
564 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
565 		i = (enum intel_dpll_id) crtc->pipe;
566 		pll = &dev_priv->display.dpll.shared_dplls[i];
567 
568 		drm_dbg_kms(&dev_priv->drm,
569 			    "[CRTC:%d:%s] using pre-allocated %s\n",
570 			    crtc->base.base.id, crtc->base.name,
571 			    pll->info->name);
572 	} else {
573 		pll = intel_find_shared_dpll(state, crtc,
574 					     &crtc_state->dpll_hw_state,
575 					     BIT(DPLL_ID_PCH_PLL_B) |
576 					     BIT(DPLL_ID_PCH_PLL_A));
577 	}
578 
579 	if (!pll)
580 		return -EINVAL;
581 
582 	/* reference the pll */
583 	intel_reference_shared_dpll(state, crtc,
584 				    pll, &crtc_state->dpll_hw_state);
585 
586 	crtc_state->shared_dpll = pll;
587 
588 	return 0;
589 }
590 
591 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
592 			      const struct intel_dpll_hw_state *hw_state)
593 {
594 	drm_dbg_kms(&dev_priv->drm,
595 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
596 		    "fp0: 0x%x, fp1: 0x%x\n",
597 		    hw_state->dpll,
598 		    hw_state->dpll_md,
599 		    hw_state->fp0,
600 		    hw_state->fp1);
601 }
602 
603 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
604 	.enable = ibx_pch_dpll_enable,
605 	.disable = ibx_pch_dpll_disable,
606 	.get_hw_state = ibx_pch_dpll_get_hw_state,
607 };
608 
609 static const struct dpll_info pch_plls[] = {
610 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
611 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
612 	{ },
613 };
614 
615 static const struct intel_dpll_mgr pch_pll_mgr = {
616 	.dpll_info = pch_plls,
617 	.compute_dplls = ibx_compute_dpll,
618 	.get_dplls = ibx_get_dpll,
619 	.put_dplls = intel_put_dpll,
620 	.dump_hw_state = ibx_dump_hw_state,
621 };
622 
623 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
624 				 struct intel_shared_dpll *pll)
625 {
626 	const enum intel_dpll_id id = pll->info->id;
627 
628 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
629 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
630 	udelay(20);
631 }
632 
633 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
634 				struct intel_shared_dpll *pll)
635 {
636 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
637 	intel_de_posting_read(dev_priv, SPLL_CTL);
638 	udelay(20);
639 }
640 
641 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
642 				  struct intel_shared_dpll *pll)
643 {
644 	const enum intel_dpll_id id = pll->info->id;
645 
646 	intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
647 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
648 
649 	/*
650 	 * Try to set up the PCH reference clock once all DPLLs
651 	 * that depend on it have been shut down.
652 	 */
653 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
654 		intel_init_pch_refclk(dev_priv);
655 }
656 
657 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
658 				 struct intel_shared_dpll *pll)
659 {
660 	enum intel_dpll_id id = pll->info->id;
661 
662 	intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
663 	intel_de_posting_read(dev_priv, SPLL_CTL);
664 
665 	/*
666 	 * Try to set up the PCH reference clock once all DPLLs
667 	 * that depend on it have been shut down.
668 	 */
669 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
670 		intel_init_pch_refclk(dev_priv);
671 }
672 
673 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
674 				       struct intel_shared_dpll *pll,
675 				       struct intel_dpll_hw_state *hw_state)
676 {
677 	const enum intel_dpll_id id = pll->info->id;
678 	intel_wakeref_t wakeref;
679 	u32 val;
680 
681 	wakeref = intel_display_power_get_if_enabled(dev_priv,
682 						     POWER_DOMAIN_DISPLAY_CORE);
683 	if (!wakeref)
684 		return false;
685 
686 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
687 	hw_state->wrpll = val;
688 
689 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
690 
691 	return val & WRPLL_PLL_ENABLE;
692 }
693 
694 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
695 				      struct intel_shared_dpll *pll,
696 				      struct intel_dpll_hw_state *hw_state)
697 {
698 	intel_wakeref_t wakeref;
699 	u32 val;
700 
701 	wakeref = intel_display_power_get_if_enabled(dev_priv,
702 						     POWER_DOMAIN_DISPLAY_CORE);
703 	if (!wakeref)
704 		return false;
705 
706 	val = intel_de_read(dev_priv, SPLL_CTL);
707 	hw_state->spll = val;
708 
709 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
710 
711 	return val & SPLL_PLL_ENABLE;
712 }
713 
714 #define LC_FREQ 2700
715 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
716 
717 #define P_MIN 2
718 #define P_MAX 64
719 #define P_INC 2
720 
721 /* Constraints for PLL good behavior */
722 #define REF_MIN 48
723 #define REF_MAX 400
724 #define VCO_MIN 2400
725 #define VCO_MAX 4800
726 
727 struct hsw_wrpll_rnp {
728 	unsigned p, n2, r2;
729 };
730 
731 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
732 {
733 	switch (clock) {
734 	case 25175000:
735 	case 25200000:
736 	case 27000000:
737 	case 27027000:
738 	case 37762500:
739 	case 37800000:
740 	case 40500000:
741 	case 40541000:
742 	case 54000000:
743 	case 54054000:
744 	case 59341000:
745 	case 59400000:
746 	case 72000000:
747 	case 74176000:
748 	case 74250000:
749 	case 81000000:
750 	case 81081000:
751 	case 89012000:
752 	case 89100000:
753 	case 108000000:
754 	case 108108000:
755 	case 111264000:
756 	case 111375000:
757 	case 148352000:
758 	case 148500000:
759 	case 162000000:
760 	case 162162000:
761 	case 222525000:
762 	case 222750000:
763 	case 296703000:
764 	case 297000000:
765 		return 0;
766 	case 233500000:
767 	case 245250000:
768 	case 247750000:
769 	case 253250000:
770 	case 298000000:
771 		return 1500;
772 	case 169128000:
773 	case 169500000:
774 	case 179500000:
775 	case 202000000:
776 		return 2000;
777 	case 256250000:
778 	case 262500000:
779 	case 270000000:
780 	case 272500000:
781 	case 273750000:
782 	case 280750000:
783 	case 281250000:
784 	case 286000000:
785 	case 291750000:
786 		return 4000;
787 	case 267250000:
788 	case 268500000:
789 		return 5000;
790 	default:
791 		return 1000;
792 	}
793 }
794 
795 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
796 				 unsigned int r2, unsigned int n2,
797 				 unsigned int p,
798 				 struct hsw_wrpll_rnp *best)
799 {
800 	u64 a, b, c, d, diff, diff_best;
801 
802 	/* No best (r,n,p) yet */
803 	if (best->p == 0) {
804 		best->p = p;
805 		best->n2 = n2;
806 		best->r2 = r2;
807 		return;
808 	}
809 
810 	/*
811 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
812 	 * freq2k.
813 	 *
814 	 * delta = 1e6 *
815 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
816 	 *	   freq2k;
817 	 *
818 	 * and we would like delta <= budget.
819 	 *
820 	 * If the discrepancy is above the PPM-based budget, always prefer to
821 	 * improve upon the previous solution.  However, if you're within the
822 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
823 	 */
824 	a = freq2k * budget * p * r2;
825 	b = freq2k * budget * best->p * best->r2;
826 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
827 	diff_best = abs_diff(freq2k * best->p * best->r2,
828 			     LC_FREQ_2K * best->n2);
829 	c = 1000000 * diff;
830 	d = 1000000 * diff_best;
831 
832 	if (a < c && b < d) {
833 		/* If both are above the budget, pick the closer */
834 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
835 			best->p = p;
836 			best->n2 = n2;
837 			best->r2 = r2;
838 		}
839 	} else if (a >= c && b < d) {
840 		/* If A is below the threshold but B is above it?  Update. */
841 		best->p = p;
842 		best->n2 = n2;
843 		best->r2 = r2;
844 	} else if (a >= c && b >= d) {
845 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
846 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
847 			best->p = p;
848 			best->n2 = n2;
849 			best->r2 = r2;
850 		}
851 	}
852 	/* Otherwise a < c && b >= d, do nothing */
853 }
854 
855 static void
856 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
857 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
858 {
859 	u64 freq2k;
860 	unsigned p, n2, r2;
861 	struct hsw_wrpll_rnp best = {};
862 	unsigned budget;
863 
864 	freq2k = clock / 100;
865 
866 	budget = hsw_wrpll_get_budget_for_freq(clock);
867 
868 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
869 	 * and directly pass the LC PLL to it. */
870 	if (freq2k == 5400000) {
871 		*n2_out = 2;
872 		*p_out = 1;
873 		*r2_out = 2;
874 		return;
875 	}
876 
877 	/*
878 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
879 	 * the WR PLL.
880 	 *
881 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
882 	 * Injecting R2 = 2 * R gives:
883 	 *   REF_MAX * r2 > LC_FREQ * 2 and
884 	 *   REF_MIN * r2 < LC_FREQ * 2
885 	 *
886 	 * Which means the desired boundaries for r2 are:
887 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
888 	 *
889 	 */
890 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
891 	     r2 <= LC_FREQ * 2 / REF_MIN;
892 	     r2++) {
893 
894 		/*
895 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
896 		 *
897 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
898 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
899 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
900 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
901 		 *
902 		 * Which means the desired boundaries for n2 are:
903 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
904 		 */
905 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
906 		     n2 <= VCO_MAX * r2 / LC_FREQ;
907 		     n2++) {
908 
909 			for (p = P_MIN; p <= P_MAX; p += P_INC)
910 				hsw_wrpll_update_rnp(freq2k, budget,
911 						     r2, n2, p, &best);
912 		}
913 	}
914 
915 	*n2_out = best.n2;
916 	*p_out = best.p;
917 	*r2_out = best.r2;
918 }
919 
920 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
921 				  const struct intel_shared_dpll *pll,
922 				  const struct intel_dpll_hw_state *pll_state)
923 {
924 	int refclk;
925 	int n, p, r;
926 	u32 wrpll = pll_state->wrpll;
927 
928 	switch (wrpll & WRPLL_REF_MASK) {
929 	case WRPLL_REF_SPECIAL_HSW:
930 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
931 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
932 			refclk = dev_priv->display.dpll.ref_clks.nssc;
933 			break;
934 		}
935 		fallthrough;
936 	case WRPLL_REF_PCH_SSC:
937 		/*
938 		 * We could calculate spread here, but our checking
939 		 * code only cares about 5% accuracy, and spread is a max of
940 		 * 0.5% downspread.
941 		 */
942 		refclk = dev_priv->display.dpll.ref_clks.ssc;
943 		break;
944 	case WRPLL_REF_LCPLL:
945 		refclk = 2700000;
946 		break;
947 	default:
948 		MISSING_CASE(wrpll);
949 		return 0;
950 	}
951 
952 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
953 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
954 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
955 
956 	/* Convert to KHz, p & r have a fixed point portion */
957 	return (refclk * n / 10) / (p * r) * 2;
958 }
959 
960 static int
961 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
962 			   struct intel_crtc *crtc)
963 {
964 	struct drm_i915_private *i915 = to_i915(state->base.dev);
965 	struct intel_crtc_state *crtc_state =
966 		intel_atomic_get_new_crtc_state(state, crtc);
967 	unsigned int p, n2, r2;
968 
969 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
970 
971 	crtc_state->dpll_hw_state.wrpll =
972 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
973 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
974 		WRPLL_DIVIDER_POST(p);
975 
976 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
977 							&crtc_state->dpll_hw_state);
978 
979 	return 0;
980 }
981 
982 static struct intel_shared_dpll *
983 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
984 		       struct intel_crtc *crtc)
985 {
986 	struct intel_crtc_state *crtc_state =
987 		intel_atomic_get_new_crtc_state(state, crtc);
988 
989 	return intel_find_shared_dpll(state, crtc,
990 				      &crtc_state->dpll_hw_state,
991 				      BIT(DPLL_ID_WRPLL2) |
992 				      BIT(DPLL_ID_WRPLL1));
993 }
994 
995 static int
996 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
997 {
998 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
999 	int clock = crtc_state->port_clock;
1000 
1001 	switch (clock / 2) {
1002 	case 81000:
1003 	case 135000:
1004 	case 270000:
1005 		return 0;
1006 	default:
1007 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
1008 			    clock);
1009 		return -EINVAL;
1010 	}
1011 }
1012 
1013 static struct intel_shared_dpll *
1014 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1015 {
1016 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1017 	struct intel_shared_dpll *pll;
1018 	enum intel_dpll_id pll_id;
1019 	int clock = crtc_state->port_clock;
1020 
1021 	switch (clock / 2) {
1022 	case 81000:
1023 		pll_id = DPLL_ID_LCPLL_810;
1024 		break;
1025 	case 135000:
1026 		pll_id = DPLL_ID_LCPLL_1350;
1027 		break;
1028 	case 270000:
1029 		pll_id = DPLL_ID_LCPLL_2700;
1030 		break;
1031 	default:
1032 		MISSING_CASE(clock / 2);
1033 		return NULL;
1034 	}
1035 
1036 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1037 
1038 	if (!pll)
1039 		return NULL;
1040 
1041 	return pll;
1042 }
1043 
1044 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1045 				  const struct intel_shared_dpll *pll,
1046 				  const struct intel_dpll_hw_state *pll_state)
1047 {
1048 	int link_clock = 0;
1049 
1050 	switch (pll->info->id) {
1051 	case DPLL_ID_LCPLL_810:
1052 		link_clock = 81000;
1053 		break;
1054 	case DPLL_ID_LCPLL_1350:
1055 		link_clock = 135000;
1056 		break;
1057 	case DPLL_ID_LCPLL_2700:
1058 		link_clock = 270000;
1059 		break;
1060 	default:
1061 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1062 		break;
1063 	}
1064 
1065 	return link_clock * 2;
1066 }
1067 
1068 static int
1069 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1070 			  struct intel_crtc *crtc)
1071 {
1072 	struct intel_crtc_state *crtc_state =
1073 		intel_atomic_get_new_crtc_state(state, crtc);
1074 
1075 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1076 		return -EINVAL;
1077 
1078 	crtc_state->dpll_hw_state.spll =
1079 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1080 
1081 	return 0;
1082 }
1083 
1084 static struct intel_shared_dpll *
1085 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1086 		      struct intel_crtc *crtc)
1087 {
1088 	struct intel_crtc_state *crtc_state =
1089 		intel_atomic_get_new_crtc_state(state, crtc);
1090 
1091 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1092 				      BIT(DPLL_ID_SPLL));
1093 }
1094 
1095 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1096 				 const struct intel_shared_dpll *pll,
1097 				 const struct intel_dpll_hw_state *pll_state)
1098 {
1099 	int link_clock = 0;
1100 
1101 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1102 	case SPLL_FREQ_810MHz:
1103 		link_clock = 81000;
1104 		break;
1105 	case SPLL_FREQ_1350MHz:
1106 		link_clock = 135000;
1107 		break;
1108 	case SPLL_FREQ_2700MHz:
1109 		link_clock = 270000;
1110 		break;
1111 	default:
1112 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1113 		break;
1114 	}
1115 
1116 	return link_clock * 2;
1117 }
1118 
1119 static int hsw_compute_dpll(struct intel_atomic_state *state,
1120 			    struct intel_crtc *crtc,
1121 			    struct intel_encoder *encoder)
1122 {
1123 	struct intel_crtc_state *crtc_state =
1124 		intel_atomic_get_new_crtc_state(state, crtc);
1125 
1126 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1127 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1128 	else if (intel_crtc_has_dp_encoder(crtc_state))
1129 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1130 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1131 		return hsw_ddi_spll_compute_dpll(state, crtc);
1132 	else
1133 		return -EINVAL;
1134 }
1135 
1136 static int hsw_get_dpll(struct intel_atomic_state *state,
1137 			struct intel_crtc *crtc,
1138 			struct intel_encoder *encoder)
1139 {
1140 	struct intel_crtc_state *crtc_state =
1141 		intel_atomic_get_new_crtc_state(state, crtc);
1142 	struct intel_shared_dpll *pll = NULL;
1143 
1144 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1145 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1146 	else if (intel_crtc_has_dp_encoder(crtc_state))
1147 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1148 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1149 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1150 
1151 	if (!pll)
1152 		return -EINVAL;
1153 
1154 	intel_reference_shared_dpll(state, crtc,
1155 				    pll, &crtc_state->dpll_hw_state);
1156 
1157 	crtc_state->shared_dpll = pll;
1158 
1159 	return 0;
1160 }
1161 
1162 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1163 {
1164 	i915->display.dpll.ref_clks.ssc = 135000;
1165 	/* Non-SSC is only used on non-ULT HSW. */
1166 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1167 		i915->display.dpll.ref_clks.nssc = 24000;
1168 	else
1169 		i915->display.dpll.ref_clks.nssc = 135000;
1170 }
1171 
1172 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1173 			      const struct intel_dpll_hw_state *hw_state)
1174 {
1175 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1176 		    hw_state->wrpll, hw_state->spll);
1177 }
1178 
1179 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1180 	.enable = hsw_ddi_wrpll_enable,
1181 	.disable = hsw_ddi_wrpll_disable,
1182 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1183 	.get_freq = hsw_ddi_wrpll_get_freq,
1184 };
1185 
1186 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1187 	.enable = hsw_ddi_spll_enable,
1188 	.disable = hsw_ddi_spll_disable,
1189 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1190 	.get_freq = hsw_ddi_spll_get_freq,
1191 };
1192 
1193 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1194 				 struct intel_shared_dpll *pll)
1195 {
1196 }
1197 
1198 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1199 				  struct intel_shared_dpll *pll)
1200 {
1201 }
1202 
1203 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1204 				       struct intel_shared_dpll *pll,
1205 				       struct intel_dpll_hw_state *hw_state)
1206 {
1207 	return true;
1208 }
1209 
1210 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1211 	.enable = hsw_ddi_lcpll_enable,
1212 	.disable = hsw_ddi_lcpll_disable,
1213 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1214 	.get_freq = hsw_ddi_lcpll_get_freq,
1215 };
1216 
1217 static const struct dpll_info hsw_plls[] = {
1218 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1219 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1220 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1221 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1222 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1223 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1224 	{ },
1225 };
1226 
1227 static const struct intel_dpll_mgr hsw_pll_mgr = {
1228 	.dpll_info = hsw_plls,
1229 	.compute_dplls = hsw_compute_dpll,
1230 	.get_dplls = hsw_get_dpll,
1231 	.put_dplls = intel_put_dpll,
1232 	.update_ref_clks = hsw_update_dpll_ref_clks,
1233 	.dump_hw_state = hsw_dump_hw_state,
1234 };
1235 
1236 struct skl_dpll_regs {
1237 	i915_reg_t ctl, cfgcr1, cfgcr2;
1238 };
1239 
1240 /* this array is indexed by the *shared* pll id */
1241 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1242 	{
1243 		/* DPLL 0 */
1244 		.ctl = LCPLL1_CTL,
1245 		/* DPLL 0 doesn't support HDMI mode */
1246 	},
1247 	{
1248 		/* DPLL 1 */
1249 		.ctl = LCPLL2_CTL,
1250 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1251 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1252 	},
1253 	{
1254 		/* DPLL 2 */
1255 		.ctl = WRPLL_CTL(0),
1256 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1257 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1258 	},
1259 	{
1260 		/* DPLL 3 */
1261 		.ctl = WRPLL_CTL(1),
1262 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1263 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1264 	},
1265 };
1266 
1267 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1268 				    struct intel_shared_dpll *pll)
1269 {
1270 	const enum intel_dpll_id id = pll->info->id;
1271 
1272 	intel_de_rmw(dev_priv, DPLL_CTRL1,
1273 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1274 		     pll->state.hw_state.ctrl1 << (id * 6));
1275 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1276 }
1277 
1278 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1279 			       struct intel_shared_dpll *pll)
1280 {
1281 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1282 	const enum intel_dpll_id id = pll->info->id;
1283 
1284 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1285 
1286 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1287 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1288 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1289 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1290 
1291 	/* the enable bit is always bit 31 */
1292 	intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1293 
1294 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1295 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1296 }
1297 
1298 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1299 				 struct intel_shared_dpll *pll)
1300 {
1301 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1302 }
1303 
1304 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1305 				struct intel_shared_dpll *pll)
1306 {
1307 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1308 	const enum intel_dpll_id id = pll->info->id;
1309 
1310 	/* the enable bit is always bit 31 */
1311 	intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1312 	intel_de_posting_read(dev_priv, regs[id].ctl);
1313 }
1314 
1315 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1316 				  struct intel_shared_dpll *pll)
1317 {
1318 }
1319 
1320 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1321 				     struct intel_shared_dpll *pll,
1322 				     struct intel_dpll_hw_state *hw_state)
1323 {
1324 	u32 val;
1325 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1326 	const enum intel_dpll_id id = pll->info->id;
1327 	intel_wakeref_t wakeref;
1328 	bool ret;
1329 
1330 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1331 						     POWER_DOMAIN_DISPLAY_CORE);
1332 	if (!wakeref)
1333 		return false;
1334 
1335 	ret = false;
1336 
1337 	val = intel_de_read(dev_priv, regs[id].ctl);
1338 	if (!(val & LCPLL_PLL_ENABLE))
1339 		goto out;
1340 
1341 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1342 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1343 
1344 	/* avoid reading back stale values if HDMI mode is not enabled */
1345 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1346 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1347 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1348 	}
1349 	ret = true;
1350 
1351 out:
1352 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1353 
1354 	return ret;
1355 }
1356 
1357 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1358 				       struct intel_shared_dpll *pll,
1359 				       struct intel_dpll_hw_state *hw_state)
1360 {
1361 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1362 	const enum intel_dpll_id id = pll->info->id;
1363 	intel_wakeref_t wakeref;
1364 	u32 val;
1365 	bool ret;
1366 
1367 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1368 						     POWER_DOMAIN_DISPLAY_CORE);
1369 	if (!wakeref)
1370 		return false;
1371 
1372 	ret = false;
1373 
1374 	/* DPLL0 is always enabled since it drives CDCLK */
1375 	val = intel_de_read(dev_priv, regs[id].ctl);
1376 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1377 		goto out;
1378 
1379 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1380 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1381 
1382 	ret = true;
1383 
1384 out:
1385 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1386 
1387 	return ret;
1388 }
1389 
1390 struct skl_wrpll_context {
1391 	u64 min_deviation;		/* current minimal deviation */
1392 	u64 central_freq;		/* chosen central freq */
1393 	u64 dco_freq;			/* chosen dco freq */
1394 	unsigned int p;			/* chosen divider */
1395 };
1396 
1397 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1398 #define SKL_DCO_MAX_PDEVIATION	100
1399 #define SKL_DCO_MAX_NDEVIATION	600
1400 
1401 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1402 				  u64 central_freq,
1403 				  u64 dco_freq,
1404 				  unsigned int divider)
1405 {
1406 	u64 deviation;
1407 
1408 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1409 			      central_freq);
1410 
1411 	/* positive deviation */
1412 	if (dco_freq >= central_freq) {
1413 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1414 		    deviation < ctx->min_deviation) {
1415 			ctx->min_deviation = deviation;
1416 			ctx->central_freq = central_freq;
1417 			ctx->dco_freq = dco_freq;
1418 			ctx->p = divider;
1419 		}
1420 	/* negative deviation */
1421 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1422 		   deviation < ctx->min_deviation) {
1423 		ctx->min_deviation = deviation;
1424 		ctx->central_freq = central_freq;
1425 		ctx->dco_freq = dco_freq;
1426 		ctx->p = divider;
1427 	}
1428 }
1429 
1430 static void skl_wrpll_get_multipliers(unsigned int p,
1431 				      unsigned int *p0 /* out */,
1432 				      unsigned int *p1 /* out */,
1433 				      unsigned int *p2 /* out */)
1434 {
1435 	/* even dividers */
1436 	if (p % 2 == 0) {
1437 		unsigned int half = p / 2;
1438 
1439 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1440 			*p0 = 2;
1441 			*p1 = 1;
1442 			*p2 = half;
1443 		} else if (half % 2 == 0) {
1444 			*p0 = 2;
1445 			*p1 = half / 2;
1446 			*p2 = 2;
1447 		} else if (half % 3 == 0) {
1448 			*p0 = 3;
1449 			*p1 = half / 3;
1450 			*p2 = 2;
1451 		} else if (half % 7 == 0) {
1452 			*p0 = 7;
1453 			*p1 = half / 7;
1454 			*p2 = 2;
1455 		}
1456 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1457 		*p0 = 3;
1458 		*p1 = 1;
1459 		*p2 = p / 3;
1460 	} else if (p == 5 || p == 7) {
1461 		*p0 = p;
1462 		*p1 = 1;
1463 		*p2 = 1;
1464 	} else if (p == 15) {
1465 		*p0 = 3;
1466 		*p1 = 1;
1467 		*p2 = 5;
1468 	} else if (p == 21) {
1469 		*p0 = 7;
1470 		*p1 = 1;
1471 		*p2 = 3;
1472 	} else if (p == 35) {
1473 		*p0 = 7;
1474 		*p1 = 1;
1475 		*p2 = 5;
1476 	}
1477 }
1478 
1479 struct skl_wrpll_params {
1480 	u32 dco_fraction;
1481 	u32 dco_integer;
1482 	u32 qdiv_ratio;
1483 	u32 qdiv_mode;
1484 	u32 kdiv;
1485 	u32 pdiv;
1486 	u32 central_freq;
1487 };
1488 
1489 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1490 				      u64 afe_clock,
1491 				      int ref_clock,
1492 				      u64 central_freq,
1493 				      u32 p0, u32 p1, u32 p2)
1494 {
1495 	u64 dco_freq;
1496 
1497 	switch (central_freq) {
1498 	case 9600000000ULL:
1499 		params->central_freq = 0;
1500 		break;
1501 	case 9000000000ULL:
1502 		params->central_freq = 1;
1503 		break;
1504 	case 8400000000ULL:
1505 		params->central_freq = 3;
1506 	}
1507 
1508 	switch (p0) {
1509 	case 1:
1510 		params->pdiv = 0;
1511 		break;
1512 	case 2:
1513 		params->pdiv = 1;
1514 		break;
1515 	case 3:
1516 		params->pdiv = 2;
1517 		break;
1518 	case 7:
1519 		params->pdiv = 4;
1520 		break;
1521 	default:
1522 		WARN(1, "Incorrect PDiv\n");
1523 	}
1524 
1525 	switch (p2) {
1526 	case 5:
1527 		params->kdiv = 0;
1528 		break;
1529 	case 2:
1530 		params->kdiv = 1;
1531 		break;
1532 	case 3:
1533 		params->kdiv = 2;
1534 		break;
1535 	case 1:
1536 		params->kdiv = 3;
1537 		break;
1538 	default:
1539 		WARN(1, "Incorrect KDiv\n");
1540 	}
1541 
1542 	params->qdiv_ratio = p1;
1543 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1544 
1545 	dco_freq = p0 * p1 * p2 * afe_clock;
1546 
1547 	/*
1548 	 * Intermediate values are in Hz.
1549 	 * Divide by MHz to match bsepc
1550 	 */
1551 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1552 	params->dco_fraction =
1553 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1554 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1555 }
1556 
1557 static int
1558 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1559 			int ref_clock,
1560 			struct skl_wrpll_params *wrpll_params)
1561 {
1562 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1563 						 9000000000ULL,
1564 						 9600000000ULL };
1565 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1566 					    24, 28, 30, 32, 36, 40, 42, 44,
1567 					    48, 52, 54, 56, 60, 64, 66, 68,
1568 					    70, 72, 76, 78, 80, 84, 88, 90,
1569 					    92, 96, 98 };
1570 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1571 	static const struct {
1572 		const u8 *list;
1573 		int n_dividers;
1574 	} dividers[] = {
1575 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1576 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1577 	};
1578 	struct skl_wrpll_context ctx = {
1579 		.min_deviation = U64_MAX,
1580 	};
1581 	unsigned int dco, d, i;
1582 	unsigned int p0, p1, p2;
1583 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1584 
1585 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1586 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1587 			for (i = 0; i < dividers[d].n_dividers; i++) {
1588 				unsigned int p = dividers[d].list[i];
1589 				u64 dco_freq = p * afe_clock;
1590 
1591 				skl_wrpll_try_divider(&ctx,
1592 						      dco_central_freq[dco],
1593 						      dco_freq,
1594 						      p);
1595 				/*
1596 				 * Skip the remaining dividers if we're sure to
1597 				 * have found the definitive divider, we can't
1598 				 * improve a 0 deviation.
1599 				 */
1600 				if (ctx.min_deviation == 0)
1601 					goto skip_remaining_dividers;
1602 			}
1603 		}
1604 
1605 skip_remaining_dividers:
1606 		/*
1607 		 * If a solution is found with an even divider, prefer
1608 		 * this one.
1609 		 */
1610 		if (d == 0 && ctx.p)
1611 			break;
1612 	}
1613 
1614 	if (!ctx.p)
1615 		return -EINVAL;
1616 
1617 	/*
1618 	 * gcc incorrectly analyses that these can be used without being
1619 	 * initialized. To be fair, it's hard to guess.
1620 	 */
1621 	p0 = p1 = p2 = 0;
1622 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1623 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1624 				  ctx.central_freq, p0, p1, p2);
1625 
1626 	return 0;
1627 }
1628 
1629 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1630 				  const struct intel_shared_dpll *pll,
1631 				  const struct intel_dpll_hw_state *pll_state)
1632 {
1633 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1634 	u32 p0, p1, p2, dco_freq;
1635 
1636 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1637 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1638 
1639 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1640 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1641 	else
1642 		p1 = 1;
1643 
1644 
1645 	switch (p0) {
1646 	case DPLL_CFGCR2_PDIV_1:
1647 		p0 = 1;
1648 		break;
1649 	case DPLL_CFGCR2_PDIV_2:
1650 		p0 = 2;
1651 		break;
1652 	case DPLL_CFGCR2_PDIV_3:
1653 		p0 = 3;
1654 		break;
1655 	case DPLL_CFGCR2_PDIV_7_INVALID:
1656 		/*
1657 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1658 		 * handling it the same way as PDIV_7.
1659 		 */
1660 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1661 		fallthrough;
1662 	case DPLL_CFGCR2_PDIV_7:
1663 		p0 = 7;
1664 		break;
1665 	default:
1666 		MISSING_CASE(p0);
1667 		return 0;
1668 	}
1669 
1670 	switch (p2) {
1671 	case DPLL_CFGCR2_KDIV_5:
1672 		p2 = 5;
1673 		break;
1674 	case DPLL_CFGCR2_KDIV_2:
1675 		p2 = 2;
1676 		break;
1677 	case DPLL_CFGCR2_KDIV_3:
1678 		p2 = 3;
1679 		break;
1680 	case DPLL_CFGCR2_KDIV_1:
1681 		p2 = 1;
1682 		break;
1683 	default:
1684 		MISSING_CASE(p2);
1685 		return 0;
1686 	}
1687 
1688 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1689 		   ref_clock;
1690 
1691 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1692 		    ref_clock / 0x8000;
1693 
1694 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1695 		return 0;
1696 
1697 	return dco_freq / (p0 * p1 * p2 * 5);
1698 }
1699 
1700 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1701 {
1702 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1703 	struct skl_wrpll_params wrpll_params = {};
1704 	u32 ctrl1, cfgcr1, cfgcr2;
1705 	int ret;
1706 
1707 	/*
1708 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1709 	 * as the DPLL id in this function.
1710 	 */
1711 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1712 
1713 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1714 
1715 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1716 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1717 	if (ret)
1718 		return ret;
1719 
1720 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1721 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1722 		wrpll_params.dco_integer;
1723 
1724 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1725 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1726 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1727 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1728 		wrpll_params.central_freq;
1729 
1730 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1731 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1732 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1733 
1734 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1735 							&crtc_state->dpll_hw_state);
1736 
1737 	return 0;
1738 }
1739 
1740 static int
1741 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1742 {
1743 	u32 ctrl1;
1744 
1745 	/*
1746 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1747 	 * as the DPLL id in this function.
1748 	 */
1749 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1750 	switch (crtc_state->port_clock / 2) {
1751 	case 81000:
1752 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1753 		break;
1754 	case 135000:
1755 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1756 		break;
1757 	case 270000:
1758 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1759 		break;
1760 		/* eDP 1.4 rates */
1761 	case 162000:
1762 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1763 		break;
1764 	case 108000:
1765 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1766 		break;
1767 	case 216000:
1768 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1769 		break;
1770 	}
1771 
1772 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1773 
1774 	return 0;
1775 }
1776 
1777 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1778 				  const struct intel_shared_dpll *pll,
1779 				  const struct intel_dpll_hw_state *pll_state)
1780 {
1781 	int link_clock = 0;
1782 
1783 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1784 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1785 	case DPLL_CTRL1_LINK_RATE_810:
1786 		link_clock = 81000;
1787 		break;
1788 	case DPLL_CTRL1_LINK_RATE_1080:
1789 		link_clock = 108000;
1790 		break;
1791 	case DPLL_CTRL1_LINK_RATE_1350:
1792 		link_clock = 135000;
1793 		break;
1794 	case DPLL_CTRL1_LINK_RATE_1620:
1795 		link_clock = 162000;
1796 		break;
1797 	case DPLL_CTRL1_LINK_RATE_2160:
1798 		link_clock = 216000;
1799 		break;
1800 	case DPLL_CTRL1_LINK_RATE_2700:
1801 		link_clock = 270000;
1802 		break;
1803 	default:
1804 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1805 		break;
1806 	}
1807 
1808 	return link_clock * 2;
1809 }
1810 
1811 static int skl_compute_dpll(struct intel_atomic_state *state,
1812 			    struct intel_crtc *crtc,
1813 			    struct intel_encoder *encoder)
1814 {
1815 	struct intel_crtc_state *crtc_state =
1816 		intel_atomic_get_new_crtc_state(state, crtc);
1817 
1818 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1819 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1820 	else if (intel_crtc_has_dp_encoder(crtc_state))
1821 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1822 	else
1823 		return -EINVAL;
1824 }
1825 
1826 static int skl_get_dpll(struct intel_atomic_state *state,
1827 			struct intel_crtc *crtc,
1828 			struct intel_encoder *encoder)
1829 {
1830 	struct intel_crtc_state *crtc_state =
1831 		intel_atomic_get_new_crtc_state(state, crtc);
1832 	struct intel_shared_dpll *pll;
1833 
1834 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1835 		pll = intel_find_shared_dpll(state, crtc,
1836 					     &crtc_state->dpll_hw_state,
1837 					     BIT(DPLL_ID_SKL_DPLL0));
1838 	else
1839 		pll = intel_find_shared_dpll(state, crtc,
1840 					     &crtc_state->dpll_hw_state,
1841 					     BIT(DPLL_ID_SKL_DPLL3) |
1842 					     BIT(DPLL_ID_SKL_DPLL2) |
1843 					     BIT(DPLL_ID_SKL_DPLL1));
1844 	if (!pll)
1845 		return -EINVAL;
1846 
1847 	intel_reference_shared_dpll(state, crtc,
1848 				    pll, &crtc_state->dpll_hw_state);
1849 
1850 	crtc_state->shared_dpll = pll;
1851 
1852 	return 0;
1853 }
1854 
1855 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1856 				const struct intel_shared_dpll *pll,
1857 				const struct intel_dpll_hw_state *pll_state)
1858 {
1859 	/*
1860 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1861 	 * the internal shift for each field
1862 	 */
1863 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1864 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1865 	else
1866 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1867 }
1868 
1869 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1870 {
1871 	/* No SSC ref */
1872 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1873 }
1874 
1875 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1876 			      const struct intel_dpll_hw_state *hw_state)
1877 {
1878 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1879 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1880 		      hw_state->ctrl1,
1881 		      hw_state->cfgcr1,
1882 		      hw_state->cfgcr2);
1883 }
1884 
1885 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1886 	.enable = skl_ddi_pll_enable,
1887 	.disable = skl_ddi_pll_disable,
1888 	.get_hw_state = skl_ddi_pll_get_hw_state,
1889 	.get_freq = skl_ddi_pll_get_freq,
1890 };
1891 
1892 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1893 	.enable = skl_ddi_dpll0_enable,
1894 	.disable = skl_ddi_dpll0_disable,
1895 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1896 	.get_freq = skl_ddi_pll_get_freq,
1897 };
1898 
1899 static const struct dpll_info skl_plls[] = {
1900 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1901 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1902 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1903 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1904 	{ },
1905 };
1906 
1907 static const struct intel_dpll_mgr skl_pll_mgr = {
1908 	.dpll_info = skl_plls,
1909 	.compute_dplls = skl_compute_dpll,
1910 	.get_dplls = skl_get_dpll,
1911 	.put_dplls = intel_put_dpll,
1912 	.update_ref_clks = skl_update_dpll_ref_clks,
1913 	.dump_hw_state = skl_dump_hw_state,
1914 };
1915 
1916 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1917 				struct intel_shared_dpll *pll)
1918 {
1919 	u32 temp;
1920 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1921 	enum dpio_phy phy;
1922 	enum dpio_channel ch;
1923 
1924 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1925 
1926 	/* Non-SSC reference */
1927 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1928 
1929 	if (IS_GEMINILAKE(dev_priv)) {
1930 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1931 			     0, PORT_PLL_POWER_ENABLE);
1932 
1933 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1934 				 PORT_PLL_POWER_STATE), 200))
1935 			drm_err(&dev_priv->drm,
1936 				"Power state not set for PLL:%d\n", port);
1937 	}
1938 
1939 	/* Disable 10 bit clock */
1940 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
1941 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1942 
1943 	/* Write P1 & P2 */
1944 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
1945 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1946 
1947 	/* Write M2 integer */
1948 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
1949 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1950 
1951 	/* Write N */
1952 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
1953 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1954 
1955 	/* Write M2 fraction */
1956 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
1957 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1958 
1959 	/* Write M2 fraction enable */
1960 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
1961 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1962 
1963 	/* Write coeff */
1964 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1965 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1966 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1967 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1968 	temp |= pll->state.hw_state.pll6;
1969 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1970 
1971 	/* Write calibration val */
1972 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
1973 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1974 
1975 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
1976 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1977 
1978 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1979 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1980 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1981 	temp |= pll->state.hw_state.pll10;
1982 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1983 
1984 	/* Recalibrate with new settings */
1985 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1986 	temp |= PORT_PLL_RECALIBRATE;
1987 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1988 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1989 	temp |= pll->state.hw_state.ebb4;
1990 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1991 
1992 	/* Enable PLL */
1993 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1994 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1995 
1996 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1997 			200))
1998 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1999 
2000 	if (IS_GEMINILAKE(dev_priv)) {
2001 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2002 		temp |= DCC_DELAY_RANGE_2;
2003 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2004 	}
2005 
2006 	/*
2007 	 * While we write to the group register to program all lanes at once we
2008 	 * can read only lane registers and we pick lanes 0/1 for that.
2009 	 */
2010 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2011 	temp &= ~LANE_STAGGER_MASK;
2012 	temp &= ~LANESTAGGER_STRAP_OVRD;
2013 	temp |= pll->state.hw_state.pcsdw12;
2014 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2015 }
2016 
2017 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2018 					struct intel_shared_dpll *pll)
2019 {
2020 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2021 
2022 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2023 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2024 
2025 	if (IS_GEMINILAKE(dev_priv)) {
2026 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
2027 			     PORT_PLL_POWER_ENABLE, 0);
2028 
2029 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2030 				  PORT_PLL_POWER_STATE), 200))
2031 			drm_err(&dev_priv->drm,
2032 				"Power state not reset for PLL:%d\n", port);
2033 	}
2034 }
2035 
2036 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2037 					struct intel_shared_dpll *pll,
2038 					struct intel_dpll_hw_state *hw_state)
2039 {
2040 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2041 	intel_wakeref_t wakeref;
2042 	enum dpio_phy phy;
2043 	enum dpio_channel ch;
2044 	u32 val;
2045 	bool ret;
2046 
2047 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2048 
2049 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2050 						     POWER_DOMAIN_DISPLAY_CORE);
2051 	if (!wakeref)
2052 		return false;
2053 
2054 	ret = false;
2055 
2056 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2057 	if (!(val & PORT_PLL_ENABLE))
2058 		goto out;
2059 
2060 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2061 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2062 
2063 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2064 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2065 
2066 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2067 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2068 
2069 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2070 	hw_state->pll1 &= PORT_PLL_N_MASK;
2071 
2072 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2073 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2074 
2075 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2076 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2077 
2078 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2079 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2080 			  PORT_PLL_INT_COEFF_MASK |
2081 			  PORT_PLL_GAIN_CTL_MASK;
2082 
2083 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2084 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2085 
2086 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2087 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2088 
2089 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2090 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2091 			   PORT_PLL_DCO_AMP_MASK;
2092 
2093 	/*
2094 	 * While we write to the group register to program all lanes at once we
2095 	 * can read only lane registers. We configure all lanes the same way, so
2096 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2097 	 */
2098 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2099 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2100 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2101 		drm_dbg(&dev_priv->drm,
2102 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2103 			hw_state->pcsdw12,
2104 			intel_de_read(dev_priv,
2105 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2106 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2107 
2108 	ret = true;
2109 
2110 out:
2111 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2112 
2113 	return ret;
2114 }
2115 
2116 /* pre-calculated values for DP linkrates */
2117 static const struct dpll bxt_dp_clk_val[] = {
2118 	/* m2 is .22 binary fixed point */
2119 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2120 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2121 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2122 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2123 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2124 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2125 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2126 };
2127 
2128 static int
2129 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2130 			  struct dpll *clk_div)
2131 {
2132 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2133 
2134 	/* Calculate HDMI div */
2135 	/*
2136 	 * FIXME: tie the following calculation into
2137 	 * i9xx_crtc_compute_clock
2138 	 */
2139 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2140 		return -EINVAL;
2141 
2142 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2143 
2144 	return 0;
2145 }
2146 
2147 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2148 				    struct dpll *clk_div)
2149 {
2150 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2151 	int i;
2152 
2153 	*clk_div = bxt_dp_clk_val[0];
2154 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2155 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2156 			*clk_div = bxt_dp_clk_val[i];
2157 			break;
2158 		}
2159 	}
2160 
2161 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2162 
2163 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2164 		    clk_div->dot != crtc_state->port_clock);
2165 }
2166 
2167 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2168 				     const struct dpll *clk_div)
2169 {
2170 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2171 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2172 	int clock = crtc_state->port_clock;
2173 	int vco = clk_div->vco;
2174 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2175 	u32 lanestagger;
2176 
2177 	if (vco >= 6200000 && vco <= 6700000) {
2178 		prop_coef = 4;
2179 		int_coef = 9;
2180 		gain_ctl = 3;
2181 		targ_cnt = 8;
2182 	} else if ((vco > 5400000 && vco < 6200000) ||
2183 			(vco >= 4800000 && vco < 5400000)) {
2184 		prop_coef = 5;
2185 		int_coef = 11;
2186 		gain_ctl = 3;
2187 		targ_cnt = 9;
2188 	} else if (vco == 5400000) {
2189 		prop_coef = 3;
2190 		int_coef = 8;
2191 		gain_ctl = 1;
2192 		targ_cnt = 9;
2193 	} else {
2194 		drm_err(&i915->drm, "Invalid VCO\n");
2195 		return -EINVAL;
2196 	}
2197 
2198 	if (clock > 270000)
2199 		lanestagger = 0x18;
2200 	else if (clock > 135000)
2201 		lanestagger = 0x0d;
2202 	else if (clock > 67000)
2203 		lanestagger = 0x07;
2204 	else if (clock > 33000)
2205 		lanestagger = 0x04;
2206 	else
2207 		lanestagger = 0x02;
2208 
2209 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2210 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2211 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2212 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2213 
2214 	if (clk_div->m2 & 0x3fffff)
2215 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2216 
2217 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2218 		PORT_PLL_INT_COEFF(int_coef) |
2219 		PORT_PLL_GAIN_CTL(gain_ctl);
2220 
2221 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2222 
2223 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2224 
2225 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2226 		PORT_PLL_DCO_AMP_OVR_EN_H;
2227 
2228 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2229 
2230 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2231 
2232 	return 0;
2233 }
2234 
2235 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2236 				const struct intel_shared_dpll *pll,
2237 				const struct intel_dpll_hw_state *pll_state)
2238 {
2239 	struct dpll clock;
2240 
2241 	clock.m1 = 2;
2242 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2243 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2244 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2245 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2246 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2247 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2248 
2249 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2250 }
2251 
2252 static int
2253 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2254 {
2255 	struct dpll clk_div = {};
2256 
2257 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2258 
2259 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2260 }
2261 
2262 static int
2263 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2264 {
2265 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2266 	struct dpll clk_div = {};
2267 	int ret;
2268 
2269 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2270 
2271 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2272 	if (ret)
2273 		return ret;
2274 
2275 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2276 						      &crtc_state->dpll_hw_state);
2277 
2278 	return 0;
2279 }
2280 
2281 static int bxt_compute_dpll(struct intel_atomic_state *state,
2282 			    struct intel_crtc *crtc,
2283 			    struct intel_encoder *encoder)
2284 {
2285 	struct intel_crtc_state *crtc_state =
2286 		intel_atomic_get_new_crtc_state(state, crtc);
2287 
2288 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2289 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2290 	else if (intel_crtc_has_dp_encoder(crtc_state))
2291 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2292 	else
2293 		return -EINVAL;
2294 }
2295 
2296 static int bxt_get_dpll(struct intel_atomic_state *state,
2297 			struct intel_crtc *crtc,
2298 			struct intel_encoder *encoder)
2299 {
2300 	struct intel_crtc_state *crtc_state =
2301 		intel_atomic_get_new_crtc_state(state, crtc);
2302 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2303 	struct intel_shared_dpll *pll;
2304 	enum intel_dpll_id id;
2305 
2306 	/* 1:1 mapping between ports and PLLs */
2307 	id = (enum intel_dpll_id) encoder->port;
2308 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2309 
2310 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2311 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2312 
2313 	intel_reference_shared_dpll(state, crtc,
2314 				    pll, &crtc_state->dpll_hw_state);
2315 
2316 	crtc_state->shared_dpll = pll;
2317 
2318 	return 0;
2319 }
2320 
2321 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2322 {
2323 	i915->display.dpll.ref_clks.ssc = 100000;
2324 	i915->display.dpll.ref_clks.nssc = 100000;
2325 	/* DSI non-SSC ref 19.2MHz */
2326 }
2327 
2328 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2329 			      const struct intel_dpll_hw_state *hw_state)
2330 {
2331 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2332 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2333 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2334 		    hw_state->ebb0,
2335 		    hw_state->ebb4,
2336 		    hw_state->pll0,
2337 		    hw_state->pll1,
2338 		    hw_state->pll2,
2339 		    hw_state->pll3,
2340 		    hw_state->pll6,
2341 		    hw_state->pll8,
2342 		    hw_state->pll9,
2343 		    hw_state->pll10,
2344 		    hw_state->pcsdw12);
2345 }
2346 
2347 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2348 	.enable = bxt_ddi_pll_enable,
2349 	.disable = bxt_ddi_pll_disable,
2350 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2351 	.get_freq = bxt_ddi_pll_get_freq,
2352 };
2353 
2354 static const struct dpll_info bxt_plls[] = {
2355 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2356 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2357 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2358 	{ },
2359 };
2360 
2361 static const struct intel_dpll_mgr bxt_pll_mgr = {
2362 	.dpll_info = bxt_plls,
2363 	.compute_dplls = bxt_compute_dpll,
2364 	.get_dplls = bxt_get_dpll,
2365 	.put_dplls = intel_put_dpll,
2366 	.update_ref_clks = bxt_update_dpll_ref_clks,
2367 	.dump_hw_state = bxt_dump_hw_state,
2368 };
2369 
2370 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2371 				      int *qdiv, int *kdiv)
2372 {
2373 	/* even dividers */
2374 	if (bestdiv % 2 == 0) {
2375 		if (bestdiv == 2) {
2376 			*pdiv = 2;
2377 			*qdiv = 1;
2378 			*kdiv = 1;
2379 		} else if (bestdiv % 4 == 0) {
2380 			*pdiv = 2;
2381 			*qdiv = bestdiv / 4;
2382 			*kdiv = 2;
2383 		} else if (bestdiv % 6 == 0) {
2384 			*pdiv = 3;
2385 			*qdiv = bestdiv / 6;
2386 			*kdiv = 2;
2387 		} else if (bestdiv % 5 == 0) {
2388 			*pdiv = 5;
2389 			*qdiv = bestdiv / 10;
2390 			*kdiv = 2;
2391 		} else if (bestdiv % 14 == 0) {
2392 			*pdiv = 7;
2393 			*qdiv = bestdiv / 14;
2394 			*kdiv = 2;
2395 		}
2396 	} else {
2397 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2398 			*pdiv = bestdiv;
2399 			*qdiv = 1;
2400 			*kdiv = 1;
2401 		} else { /* 9, 15, 21 */
2402 			*pdiv = bestdiv / 3;
2403 			*qdiv = 1;
2404 			*kdiv = 3;
2405 		}
2406 	}
2407 }
2408 
2409 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2410 				      u32 dco_freq, u32 ref_freq,
2411 				      int pdiv, int qdiv, int kdiv)
2412 {
2413 	u32 dco;
2414 
2415 	switch (kdiv) {
2416 	case 1:
2417 		params->kdiv = 1;
2418 		break;
2419 	case 2:
2420 		params->kdiv = 2;
2421 		break;
2422 	case 3:
2423 		params->kdiv = 4;
2424 		break;
2425 	default:
2426 		WARN(1, "Incorrect KDiv\n");
2427 	}
2428 
2429 	switch (pdiv) {
2430 	case 2:
2431 		params->pdiv = 1;
2432 		break;
2433 	case 3:
2434 		params->pdiv = 2;
2435 		break;
2436 	case 5:
2437 		params->pdiv = 4;
2438 		break;
2439 	case 7:
2440 		params->pdiv = 8;
2441 		break;
2442 	default:
2443 		WARN(1, "Incorrect PDiv\n");
2444 	}
2445 
2446 	WARN_ON(kdiv != 2 && qdiv != 1);
2447 
2448 	params->qdiv_ratio = qdiv;
2449 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2450 
2451 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2452 
2453 	params->dco_integer = dco >> 15;
2454 	params->dco_fraction = dco & 0x7fff;
2455 }
2456 
2457 /*
2458  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2459  * Program half of the nominal DCO divider fraction value.
2460  */
2461 static bool
2462 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2463 {
2464 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2465 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2466 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2467 		 i915->display.dpll.ref_clks.nssc == 38400;
2468 }
2469 
2470 struct icl_combo_pll_params {
2471 	int clock;
2472 	struct skl_wrpll_params wrpll;
2473 };
2474 
2475 /*
2476  * These values alrea already adjusted: they're the bits we write to the
2477  * registers, not the logical values.
2478  */
2479 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2480 	{ 540000,
2481 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2482 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2483 	{ 270000,
2484 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2485 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2486 	{ 162000,
2487 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2488 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2489 	{ 324000,
2490 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2491 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2492 	{ 216000,
2493 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2494 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2495 	{ 432000,
2496 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2497 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2498 	{ 648000,
2499 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2500 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2501 	{ 810000,
2502 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2503 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2504 };
2505 
2506 
2507 /* Also used for 38.4 MHz values. */
2508 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2509 	{ 540000,
2510 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2511 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 	{ 270000,
2513 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2514 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 	{ 162000,
2516 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2517 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2518 	{ 324000,
2519 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2520 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 	{ 216000,
2522 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2523 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2524 	{ 432000,
2525 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2526 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2527 	{ 648000,
2528 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2529 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2530 	{ 810000,
2531 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2532 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2533 };
2534 
2535 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2536 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2537 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2538 };
2539 
2540 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2541 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2542 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2543 };
2544 
2545 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2546 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2547 	/* the following params are unused */
2548 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2549 };
2550 
2551 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2552 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2553 	/* the following params are unused */
2554 };
2555 
2556 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2557 				 struct skl_wrpll_params *pll_params)
2558 {
2559 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2560 	const struct icl_combo_pll_params *params =
2561 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2562 		icl_dp_combo_pll_24MHz_values :
2563 		icl_dp_combo_pll_19_2MHz_values;
2564 	int clock = crtc_state->port_clock;
2565 	int i;
2566 
2567 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2568 		if (clock == params[i].clock) {
2569 			*pll_params = params[i].wrpll;
2570 			return 0;
2571 		}
2572 	}
2573 
2574 	MISSING_CASE(clock);
2575 	return -EINVAL;
2576 }
2577 
2578 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2579 			    struct skl_wrpll_params *pll_params)
2580 {
2581 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2582 
2583 	if (DISPLAY_VER(dev_priv) >= 12) {
2584 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2585 		default:
2586 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2587 			fallthrough;
2588 		case 19200:
2589 		case 38400:
2590 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2591 			break;
2592 		case 24000:
2593 			*pll_params = tgl_tbt_pll_24MHz_values;
2594 			break;
2595 		}
2596 	} else {
2597 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2598 		default:
2599 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2600 			fallthrough;
2601 		case 19200:
2602 		case 38400:
2603 			*pll_params = icl_tbt_pll_19_2MHz_values;
2604 			break;
2605 		case 24000:
2606 			*pll_params = icl_tbt_pll_24MHz_values;
2607 			break;
2608 		}
2609 	}
2610 
2611 	return 0;
2612 }
2613 
2614 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2615 				    const struct intel_shared_dpll *pll,
2616 				    const struct intel_dpll_hw_state *pll_state)
2617 {
2618 	/*
2619 	 * The PLL outputs multiple frequencies at the same time, selection is
2620 	 * made at DDI clock mux level.
2621 	 */
2622 	drm_WARN_ON(&i915->drm, 1);
2623 
2624 	return 0;
2625 }
2626 
2627 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2628 {
2629 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2630 
2631 	/*
2632 	 * For ICL+, the spec states: if reference frequency is 38.4,
2633 	 * use 19.2 because the DPLL automatically divides that by 2.
2634 	 */
2635 	if (ref_clock == 38400)
2636 		ref_clock = 19200;
2637 
2638 	return ref_clock;
2639 }
2640 
2641 static int
2642 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2643 	       struct skl_wrpll_params *wrpll_params)
2644 {
2645 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2646 	int ref_clock = icl_wrpll_ref_clock(i915);
2647 	u32 afe_clock = crtc_state->port_clock * 5;
2648 	u32 dco_min = 7998000;
2649 	u32 dco_max = 10000000;
2650 	u32 dco_mid = (dco_min + dco_max) / 2;
2651 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2652 					 18, 20, 24, 28, 30, 32,  36,  40,
2653 					 42, 44, 48, 50, 52, 54,  56,  60,
2654 					 64, 66, 68, 70, 72, 76,  78,  80,
2655 					 84, 88, 90, 92, 96, 98, 100, 102,
2656 					  3,  5,  7,  9, 15, 21 };
2657 	u32 dco, best_dco = 0, dco_centrality = 0;
2658 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2659 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2660 
2661 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2662 		dco = afe_clock * dividers[d];
2663 
2664 		if (dco <= dco_max && dco >= dco_min) {
2665 			dco_centrality = abs(dco - dco_mid);
2666 
2667 			if (dco_centrality < best_dco_centrality) {
2668 				best_dco_centrality = dco_centrality;
2669 				best_div = dividers[d];
2670 				best_dco = dco;
2671 			}
2672 		}
2673 	}
2674 
2675 	if (best_div == 0)
2676 		return -EINVAL;
2677 
2678 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2679 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2680 				  pdiv, qdiv, kdiv);
2681 
2682 	return 0;
2683 }
2684 
2685 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2686 				      const struct intel_shared_dpll *pll,
2687 				      const struct intel_dpll_hw_state *pll_state)
2688 {
2689 	int ref_clock = icl_wrpll_ref_clock(i915);
2690 	u32 dco_fraction;
2691 	u32 p0, p1, p2, dco_freq;
2692 
2693 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2694 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2695 
2696 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2697 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2698 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2699 	else
2700 		p1 = 1;
2701 
2702 	switch (p0) {
2703 	case DPLL_CFGCR1_PDIV_2:
2704 		p0 = 2;
2705 		break;
2706 	case DPLL_CFGCR1_PDIV_3:
2707 		p0 = 3;
2708 		break;
2709 	case DPLL_CFGCR1_PDIV_5:
2710 		p0 = 5;
2711 		break;
2712 	case DPLL_CFGCR1_PDIV_7:
2713 		p0 = 7;
2714 		break;
2715 	}
2716 
2717 	switch (p2) {
2718 	case DPLL_CFGCR1_KDIV_1:
2719 		p2 = 1;
2720 		break;
2721 	case DPLL_CFGCR1_KDIV_2:
2722 		p2 = 2;
2723 		break;
2724 	case DPLL_CFGCR1_KDIV_3:
2725 		p2 = 3;
2726 		break;
2727 	}
2728 
2729 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2730 		   ref_clock;
2731 
2732 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2733 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2734 
2735 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2736 		dco_fraction *= 2;
2737 
2738 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2739 
2740 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2741 		return 0;
2742 
2743 	return dco_freq / (p0 * p1 * p2 * 5);
2744 }
2745 
2746 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2747 				const struct skl_wrpll_params *pll_params,
2748 				struct intel_dpll_hw_state *pll_state)
2749 {
2750 	u32 dco_fraction = pll_params->dco_fraction;
2751 
2752 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2753 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2754 
2755 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2756 			    pll_params->dco_integer;
2757 
2758 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2759 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2760 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2761 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2762 
2763 	if (DISPLAY_VER(i915) >= 12)
2764 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2765 	else
2766 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2767 
2768 	if (i915->display.vbt.override_afc_startup)
2769 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2770 }
2771 
2772 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2773 				    u32 *target_dco_khz,
2774 				    struct intel_dpll_hw_state *state,
2775 				    bool is_dkl)
2776 {
2777 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2778 	u32 dco_min_freq, dco_max_freq;
2779 	unsigned int i;
2780 	int div2;
2781 
2782 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2783 	dco_max_freq = is_dp ? 8100000 : 10000000;
2784 
2785 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2786 		int div1 = div1_vals[i];
2787 
2788 		for (div2 = 10; div2 > 0; div2--) {
2789 			int dco = div1 * div2 * clock_khz * 5;
2790 			int a_divratio, tlinedrv, inputsel;
2791 			u32 hsdiv;
2792 
2793 			if (dco < dco_min_freq || dco > dco_max_freq)
2794 				continue;
2795 
2796 			if (div2 >= 2) {
2797 				/*
2798 				 * Note: a_divratio not matching TGL BSpec
2799 				 * algorithm but matching hardcoded values and
2800 				 * working on HW for DP alt-mode at least
2801 				 */
2802 				a_divratio = is_dp ? 10 : 5;
2803 				tlinedrv = is_dkl ? 1 : 2;
2804 			} else {
2805 				a_divratio = 5;
2806 				tlinedrv = 0;
2807 			}
2808 			inputsel = is_dp ? 0 : 1;
2809 
2810 			switch (div1) {
2811 			default:
2812 				MISSING_CASE(div1);
2813 				fallthrough;
2814 			case 2:
2815 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2816 				break;
2817 			case 3:
2818 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2819 				break;
2820 			case 5:
2821 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2822 				break;
2823 			case 7:
2824 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2825 				break;
2826 			}
2827 
2828 			*target_dco_khz = dco;
2829 
2830 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2831 
2832 			state->mg_clktop2_coreclkctl1 =
2833 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2834 
2835 			state->mg_clktop2_hsclkctl =
2836 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2837 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2838 				hsdiv |
2839 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2840 
2841 			return 0;
2842 		}
2843 	}
2844 
2845 	return -EINVAL;
2846 }
2847 
2848 /*
2849  * The specification for this function uses real numbers, so the math had to be
2850  * adapted to integer-only calculation, that's why it looks so different.
2851  */
2852 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2853 				 struct intel_dpll_hw_state *pll_state)
2854 {
2855 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2856 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2857 	int clock = crtc_state->port_clock;
2858 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2859 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2860 	u32 prop_coeff, int_coeff;
2861 	u32 tdc_targetcnt, feedfwgain;
2862 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2863 	u64 tmp;
2864 	bool use_ssc = false;
2865 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2866 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2867 	int ret;
2868 
2869 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2870 				       pll_state, is_dkl);
2871 	if (ret)
2872 		return ret;
2873 
2874 	m1div = 2;
2875 	m2div_int = dco_khz / (refclk_khz * m1div);
2876 	if (m2div_int > 255) {
2877 		if (!is_dkl) {
2878 			m1div = 4;
2879 			m2div_int = dco_khz / (refclk_khz * m1div);
2880 		}
2881 
2882 		if (m2div_int > 255)
2883 			return -EINVAL;
2884 	}
2885 	m2div_rem = dco_khz % (refclk_khz * m1div);
2886 
2887 	tmp = (u64)m2div_rem * (1 << 22);
2888 	do_div(tmp, refclk_khz * m1div);
2889 	m2div_frac = tmp;
2890 
2891 	switch (refclk_khz) {
2892 	case 19200:
2893 		iref_ndiv = 1;
2894 		iref_trim = 28;
2895 		iref_pulse_w = 1;
2896 		break;
2897 	case 24000:
2898 		iref_ndiv = 1;
2899 		iref_trim = 25;
2900 		iref_pulse_w = 2;
2901 		break;
2902 	case 38400:
2903 		iref_ndiv = 2;
2904 		iref_trim = 28;
2905 		iref_pulse_w = 1;
2906 		break;
2907 	default:
2908 		MISSING_CASE(refclk_khz);
2909 		return -EINVAL;
2910 	}
2911 
2912 	/*
2913 	 * tdc_res = 0.000003
2914 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2915 	 *
2916 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2917 	 * was supposed to be a division, but we rearranged the operations of
2918 	 * the formula to avoid early divisions so we don't multiply the
2919 	 * rounding errors.
2920 	 *
2921 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2922 	 * we also rearrange to work with integers.
2923 	 *
2924 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2925 	 * last division by 10.
2926 	 */
2927 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2928 
2929 	/*
2930 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2931 	 * 32 bits. That's not a problem since we round the division down
2932 	 * anyway.
2933 	 */
2934 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2935 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2936 
2937 	if (dco_khz >= 9000000) {
2938 		prop_coeff = 5;
2939 		int_coeff = 10;
2940 	} else {
2941 		prop_coeff = 4;
2942 		int_coeff = 8;
2943 	}
2944 
2945 	if (use_ssc) {
2946 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2947 		do_div(tmp, refclk_khz * m1div * 10000);
2948 		ssc_stepsize = tmp;
2949 
2950 		tmp = mul_u32_u32(dco_khz, 1000);
2951 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2952 	} else {
2953 		ssc_stepsize = 0;
2954 		ssc_steplen = 0;
2955 	}
2956 	ssc_steplog = 4;
2957 
2958 	/* write pll_state calculations */
2959 	if (is_dkl) {
2960 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2961 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2962 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2963 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2964 		if (dev_priv->display.vbt.override_afc_startup) {
2965 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2966 
2967 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2968 		}
2969 
2970 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2971 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2972 
2973 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2974 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2975 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2976 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2977 
2978 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2979 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2980 
2981 		pll_state->mg_pll_tdc_coldst_bias =
2982 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2983 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2984 
2985 	} else {
2986 		pll_state->mg_pll_div0 =
2987 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2988 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2989 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2990 
2991 		pll_state->mg_pll_div1 =
2992 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2993 			MG_PLL_DIV1_DITHER_DIV_2 |
2994 			MG_PLL_DIV1_NDIVRATIO(1) |
2995 			MG_PLL_DIV1_FBPREDIV(m1div);
2996 
2997 		pll_state->mg_pll_lf =
2998 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2999 			MG_PLL_LF_AFCCNTSEL_512 |
3000 			MG_PLL_LF_GAINCTRL(1) |
3001 			MG_PLL_LF_INT_COEFF(int_coeff) |
3002 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3003 
3004 		pll_state->mg_pll_frac_lock =
3005 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3006 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3007 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3008 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3009 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3010 		if (use_ssc || m2div_rem > 0)
3011 			pll_state->mg_pll_frac_lock |=
3012 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3013 
3014 		pll_state->mg_pll_ssc =
3015 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3016 			MG_PLL_SSC_TYPE(2) |
3017 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3018 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3019 			MG_PLL_SSC_FLLEN |
3020 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3021 
3022 		pll_state->mg_pll_tdc_coldst_bias =
3023 			MG_PLL_TDC_COLDST_COLDSTART |
3024 			MG_PLL_TDC_COLDST_IREFINT_EN |
3025 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3026 			MG_PLL_TDC_TDCOVCCORR_EN |
3027 			MG_PLL_TDC_TDCSEL(3);
3028 
3029 		pll_state->mg_pll_bias =
3030 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3031 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3032 			MG_PLL_BIAS_BIAS_BONUS(10) |
3033 			MG_PLL_BIAS_BIASCAL_EN |
3034 			MG_PLL_BIAS_CTRIM(12) |
3035 			MG_PLL_BIAS_VREF_RDAC(4) |
3036 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3037 
3038 		if (refclk_khz == 38400) {
3039 			pll_state->mg_pll_tdc_coldst_bias_mask =
3040 				MG_PLL_TDC_COLDST_COLDSTART;
3041 			pll_state->mg_pll_bias_mask = 0;
3042 		} else {
3043 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3044 			pll_state->mg_pll_bias_mask = -1U;
3045 		}
3046 
3047 		pll_state->mg_pll_tdc_coldst_bias &=
3048 			pll_state->mg_pll_tdc_coldst_bias_mask;
3049 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3050 	}
3051 
3052 	return 0;
3053 }
3054 
3055 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3056 				   const struct intel_shared_dpll *pll,
3057 				   const struct intel_dpll_hw_state *pll_state)
3058 {
3059 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3060 	u64 tmp;
3061 
3062 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3063 
3064 	if (DISPLAY_VER(dev_priv) >= 12) {
3065 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3066 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3067 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3068 
3069 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3070 			m2_frac = pll_state->mg_pll_bias &
3071 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3072 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3073 		} else {
3074 			m2_frac = 0;
3075 		}
3076 	} else {
3077 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3078 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3079 
3080 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3081 			m2_frac = pll_state->mg_pll_div0 &
3082 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3083 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3084 		} else {
3085 			m2_frac = 0;
3086 		}
3087 	}
3088 
3089 	switch (pll_state->mg_clktop2_hsclkctl &
3090 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3091 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3092 		div1 = 2;
3093 		break;
3094 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3095 		div1 = 3;
3096 		break;
3097 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3098 		div1 = 5;
3099 		break;
3100 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3101 		div1 = 7;
3102 		break;
3103 	default:
3104 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3105 		return 0;
3106 	}
3107 
3108 	div2 = (pll_state->mg_clktop2_hsclkctl &
3109 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3110 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3111 
3112 	/* div2 value of 0 is same as 1 means no div */
3113 	if (div2 == 0)
3114 		div2 = 1;
3115 
3116 	/*
3117 	 * Adjust the original formula to delay the division by 2^22 in order to
3118 	 * minimize possible rounding errors.
3119 	 */
3120 	tmp = (u64)m1 * m2_int * ref_clock +
3121 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3122 	tmp = div_u64(tmp, 5 * div1 * div2);
3123 
3124 	return tmp;
3125 }
3126 
3127 /**
3128  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3129  * @crtc_state: state for the CRTC to select the DPLL for
3130  * @port_dpll_id: the active @port_dpll_id to select
3131  *
3132  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3133  * CRTC.
3134  */
3135 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3136 			      enum icl_port_dpll_id port_dpll_id)
3137 {
3138 	struct icl_port_dpll *port_dpll =
3139 		&crtc_state->icl_port_dplls[port_dpll_id];
3140 
3141 	crtc_state->shared_dpll = port_dpll->pll;
3142 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3143 }
3144 
3145 static void icl_update_active_dpll(struct intel_atomic_state *state,
3146 				   struct intel_crtc *crtc,
3147 				   struct intel_encoder *encoder)
3148 {
3149 	struct intel_crtc_state *crtc_state =
3150 		intel_atomic_get_new_crtc_state(state, crtc);
3151 	struct intel_digital_port *primary_port;
3152 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3153 
3154 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3155 		enc_to_mst(encoder)->primary :
3156 		enc_to_dig_port(encoder);
3157 
3158 	if (primary_port &&
3159 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3160 	     intel_tc_port_in_legacy_mode(primary_port)))
3161 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3162 
3163 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3164 }
3165 
3166 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3167 				      struct intel_crtc *crtc)
3168 {
3169 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3170 	struct intel_crtc_state *crtc_state =
3171 		intel_atomic_get_new_crtc_state(state, crtc);
3172 	struct icl_port_dpll *port_dpll =
3173 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3174 	struct skl_wrpll_params pll_params = {};
3175 	int ret;
3176 
3177 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3178 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3179 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3180 	else
3181 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3182 
3183 	if (ret)
3184 		return ret;
3185 
3186 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3187 
3188 	/* this is mainly for the fastset check */
3189 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3190 
3191 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3192 							    &port_dpll->hw_state);
3193 
3194 	return 0;
3195 }
3196 
3197 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3198 				  struct intel_crtc *crtc,
3199 				  struct intel_encoder *encoder)
3200 {
3201 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3202 	struct intel_crtc_state *crtc_state =
3203 		intel_atomic_get_new_crtc_state(state, crtc);
3204 	struct icl_port_dpll *port_dpll =
3205 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3206 	enum port port = encoder->port;
3207 	unsigned long dpll_mask;
3208 
3209 	if (IS_ALDERLAKE_S(dev_priv)) {
3210 		dpll_mask =
3211 			BIT(DPLL_ID_DG1_DPLL3) |
3212 			BIT(DPLL_ID_DG1_DPLL2) |
3213 			BIT(DPLL_ID_ICL_DPLL1) |
3214 			BIT(DPLL_ID_ICL_DPLL0);
3215 	} else if (IS_DG1(dev_priv)) {
3216 		if (port == PORT_D || port == PORT_E) {
3217 			dpll_mask =
3218 				BIT(DPLL_ID_DG1_DPLL2) |
3219 				BIT(DPLL_ID_DG1_DPLL3);
3220 		} else {
3221 			dpll_mask =
3222 				BIT(DPLL_ID_DG1_DPLL0) |
3223 				BIT(DPLL_ID_DG1_DPLL1);
3224 		}
3225 	} else if (IS_ROCKETLAKE(dev_priv)) {
3226 		dpll_mask =
3227 			BIT(DPLL_ID_EHL_DPLL4) |
3228 			BIT(DPLL_ID_ICL_DPLL1) |
3229 			BIT(DPLL_ID_ICL_DPLL0);
3230 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3231 		dpll_mask =
3232 			BIT(DPLL_ID_EHL_DPLL4) |
3233 			BIT(DPLL_ID_ICL_DPLL1) |
3234 			BIT(DPLL_ID_ICL_DPLL0);
3235 	} else {
3236 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3237 	}
3238 
3239 	/* Eliminate DPLLs from consideration if reserved by HTI */
3240 	dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3241 
3242 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3243 						&port_dpll->hw_state,
3244 						dpll_mask);
3245 	if (!port_dpll->pll)
3246 		return -EINVAL;
3247 
3248 	intel_reference_shared_dpll(state, crtc,
3249 				    port_dpll->pll, &port_dpll->hw_state);
3250 
3251 	icl_update_active_dpll(state, crtc, encoder);
3252 
3253 	return 0;
3254 }
3255 
3256 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3257 				    struct intel_crtc *crtc)
3258 {
3259 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3260 	struct intel_crtc_state *crtc_state =
3261 		intel_atomic_get_new_crtc_state(state, crtc);
3262 	struct icl_port_dpll *port_dpll =
3263 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3264 	struct skl_wrpll_params pll_params = {};
3265 	int ret;
3266 
3267 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3268 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3269 	if (ret)
3270 		return ret;
3271 
3272 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3273 
3274 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3275 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3276 	if (ret)
3277 		return ret;
3278 
3279 	/* this is mainly for the fastset check */
3280 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3281 
3282 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3283 							 &port_dpll->hw_state);
3284 
3285 	return 0;
3286 }
3287 
3288 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3289 				struct intel_crtc *crtc,
3290 				struct intel_encoder *encoder)
3291 {
3292 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3293 	struct intel_crtc_state *crtc_state =
3294 		intel_atomic_get_new_crtc_state(state, crtc);
3295 	struct icl_port_dpll *port_dpll =
3296 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3297 	enum intel_dpll_id dpll_id;
3298 	int ret;
3299 
3300 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3301 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3302 						&port_dpll->hw_state,
3303 						BIT(DPLL_ID_ICL_TBTPLL));
3304 	if (!port_dpll->pll)
3305 		return -EINVAL;
3306 	intel_reference_shared_dpll(state, crtc,
3307 				    port_dpll->pll, &port_dpll->hw_state);
3308 
3309 
3310 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3311 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3312 							 encoder->port));
3313 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3314 						&port_dpll->hw_state,
3315 						BIT(dpll_id));
3316 	if (!port_dpll->pll) {
3317 		ret = -EINVAL;
3318 		goto err_unreference_tbt_pll;
3319 	}
3320 	intel_reference_shared_dpll(state, crtc,
3321 				    port_dpll->pll, &port_dpll->hw_state);
3322 
3323 	icl_update_active_dpll(state, crtc, encoder);
3324 
3325 	return 0;
3326 
3327 err_unreference_tbt_pll:
3328 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3329 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3330 
3331 	return ret;
3332 }
3333 
3334 static int icl_compute_dplls(struct intel_atomic_state *state,
3335 			     struct intel_crtc *crtc,
3336 			     struct intel_encoder *encoder)
3337 {
3338 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3339 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3340 
3341 	if (intel_phy_is_combo(dev_priv, phy))
3342 		return icl_compute_combo_phy_dpll(state, crtc);
3343 	else if (intel_phy_is_tc(dev_priv, phy))
3344 		return icl_compute_tc_phy_dplls(state, crtc);
3345 
3346 	MISSING_CASE(phy);
3347 
3348 	return 0;
3349 }
3350 
3351 static int icl_get_dplls(struct intel_atomic_state *state,
3352 			 struct intel_crtc *crtc,
3353 			 struct intel_encoder *encoder)
3354 {
3355 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3356 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3357 
3358 	if (intel_phy_is_combo(dev_priv, phy))
3359 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3360 	else if (intel_phy_is_tc(dev_priv, phy))
3361 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3362 
3363 	MISSING_CASE(phy);
3364 
3365 	return -EINVAL;
3366 }
3367 
3368 static void icl_put_dplls(struct intel_atomic_state *state,
3369 			  struct intel_crtc *crtc)
3370 {
3371 	const struct intel_crtc_state *old_crtc_state =
3372 		intel_atomic_get_old_crtc_state(state, crtc);
3373 	struct intel_crtc_state *new_crtc_state =
3374 		intel_atomic_get_new_crtc_state(state, crtc);
3375 	enum icl_port_dpll_id id;
3376 
3377 	new_crtc_state->shared_dpll = NULL;
3378 
3379 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3380 		const struct icl_port_dpll *old_port_dpll =
3381 			&old_crtc_state->icl_port_dplls[id];
3382 		struct icl_port_dpll *new_port_dpll =
3383 			&new_crtc_state->icl_port_dplls[id];
3384 
3385 		new_port_dpll->pll = NULL;
3386 
3387 		if (!old_port_dpll->pll)
3388 			continue;
3389 
3390 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3391 	}
3392 }
3393 
3394 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3395 				struct intel_shared_dpll *pll,
3396 				struct intel_dpll_hw_state *hw_state)
3397 {
3398 	const enum intel_dpll_id id = pll->info->id;
3399 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3400 	intel_wakeref_t wakeref;
3401 	bool ret = false;
3402 	u32 val;
3403 
3404 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3405 
3406 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3407 						     POWER_DOMAIN_DISPLAY_CORE);
3408 	if (!wakeref)
3409 		return false;
3410 
3411 	val = intel_de_read(dev_priv, enable_reg);
3412 	if (!(val & PLL_ENABLE))
3413 		goto out;
3414 
3415 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3416 						  MG_REFCLKIN_CTL(tc_port));
3417 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3418 
3419 	hw_state->mg_clktop2_coreclkctl1 =
3420 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3421 	hw_state->mg_clktop2_coreclkctl1 &=
3422 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3423 
3424 	hw_state->mg_clktop2_hsclkctl =
3425 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3426 	hw_state->mg_clktop2_hsclkctl &=
3427 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3428 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3429 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3430 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3431 
3432 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3433 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3434 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3435 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3436 						   MG_PLL_FRAC_LOCK(tc_port));
3437 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3438 
3439 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3440 	hw_state->mg_pll_tdc_coldst_bias =
3441 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3442 
3443 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3444 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3445 		hw_state->mg_pll_bias_mask = 0;
3446 	} else {
3447 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3448 		hw_state->mg_pll_bias_mask = -1U;
3449 	}
3450 
3451 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3452 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3453 
3454 	ret = true;
3455 out:
3456 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3457 	return ret;
3458 }
3459 
3460 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3461 				 struct intel_shared_dpll *pll,
3462 				 struct intel_dpll_hw_state *hw_state)
3463 {
3464 	const enum intel_dpll_id id = pll->info->id;
3465 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3466 	intel_wakeref_t wakeref;
3467 	bool ret = false;
3468 	u32 val;
3469 
3470 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3471 						     POWER_DOMAIN_DISPLAY_CORE);
3472 	if (!wakeref)
3473 		return false;
3474 
3475 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3476 	if (!(val & PLL_ENABLE))
3477 		goto out;
3478 
3479 	/*
3480 	 * All registers read here have the same HIP_INDEX_REG even though
3481 	 * they are on different building blocks
3482 	 */
3483 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3484 						       DKL_REFCLKIN_CTL(tc_port));
3485 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3486 
3487 	hw_state->mg_clktop2_hsclkctl =
3488 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3489 	hw_state->mg_clktop2_hsclkctl &=
3490 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3491 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3492 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3493 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3494 
3495 	hw_state->mg_clktop2_coreclkctl1 =
3496 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3497 	hw_state->mg_clktop2_coreclkctl1 &=
3498 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3499 
3500 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3501 	val = DKL_PLL_DIV0_MASK;
3502 	if (dev_priv->display.vbt.override_afc_startup)
3503 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3504 	hw_state->mg_pll_div0 &= val;
3505 
3506 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3507 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3508 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3509 
3510 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3511 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3512 				 DKL_PLL_SSC_STEP_LEN_MASK |
3513 				 DKL_PLL_SSC_STEP_NUM_MASK |
3514 				 DKL_PLL_SSC_EN);
3515 
3516 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3517 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3518 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3519 
3520 	hw_state->mg_pll_tdc_coldst_bias =
3521 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3522 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3523 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3524 
3525 	ret = true;
3526 out:
3527 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3528 	return ret;
3529 }
3530 
3531 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3532 				 struct intel_shared_dpll *pll,
3533 				 struct intel_dpll_hw_state *hw_state,
3534 				 i915_reg_t enable_reg)
3535 {
3536 	const enum intel_dpll_id id = pll->info->id;
3537 	intel_wakeref_t wakeref;
3538 	bool ret = false;
3539 	u32 val;
3540 
3541 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3542 						     POWER_DOMAIN_DISPLAY_CORE);
3543 	if (!wakeref)
3544 		return false;
3545 
3546 	val = intel_de_read(dev_priv, enable_reg);
3547 	if (!(val & PLL_ENABLE))
3548 		goto out;
3549 
3550 	if (IS_ALDERLAKE_S(dev_priv)) {
3551 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3552 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3553 	} else if (IS_DG1(dev_priv)) {
3554 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3555 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3556 	} else if (IS_ROCKETLAKE(dev_priv)) {
3557 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3558 						 RKL_DPLL_CFGCR0(id));
3559 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3560 						 RKL_DPLL_CFGCR1(id));
3561 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3562 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3563 						 TGL_DPLL_CFGCR0(id));
3564 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3565 						 TGL_DPLL_CFGCR1(id));
3566 		if (dev_priv->display.vbt.override_afc_startup) {
3567 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3568 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3569 		}
3570 	} else {
3571 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3572 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3573 							 ICL_DPLL_CFGCR0(4));
3574 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3575 							 ICL_DPLL_CFGCR1(4));
3576 		} else {
3577 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3578 							 ICL_DPLL_CFGCR0(id));
3579 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3580 							 ICL_DPLL_CFGCR1(id));
3581 		}
3582 	}
3583 
3584 	ret = true;
3585 out:
3586 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3587 	return ret;
3588 }
3589 
3590 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3591 				   struct intel_shared_dpll *pll,
3592 				   struct intel_dpll_hw_state *hw_state)
3593 {
3594 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3595 
3596 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3597 }
3598 
3599 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3600 				 struct intel_shared_dpll *pll,
3601 				 struct intel_dpll_hw_state *hw_state)
3602 {
3603 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3604 }
3605 
3606 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3607 			   struct intel_shared_dpll *pll)
3608 {
3609 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3610 	const enum intel_dpll_id id = pll->info->id;
3611 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3612 
3613 	if (IS_ALDERLAKE_S(dev_priv)) {
3614 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3615 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3616 	} else if (IS_DG1(dev_priv)) {
3617 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3618 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3619 	} else if (IS_ROCKETLAKE(dev_priv)) {
3620 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3621 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3622 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3623 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3624 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3625 		div0_reg = TGL_DPLL0_DIV0(id);
3626 	} else {
3627 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3628 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3629 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3630 		} else {
3631 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3632 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3633 		}
3634 	}
3635 
3636 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3637 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3638 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3639 			 !i915_mmio_reg_valid(div0_reg));
3640 	if (dev_priv->display.vbt.override_afc_startup &&
3641 	    i915_mmio_reg_valid(div0_reg))
3642 		intel_de_rmw(dev_priv, div0_reg,
3643 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3644 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3645 }
3646 
3647 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3648 			     struct intel_shared_dpll *pll)
3649 {
3650 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3651 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3652 
3653 	/*
3654 	 * Some of the following registers have reserved fields, so program
3655 	 * these with RMW based on a mask. The mask can be fixed or generated
3656 	 * during the calc/readout phase if the mask depends on some other HW
3657 	 * state like refclk, see icl_calc_mg_pll_state().
3658 	 */
3659 	intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
3660 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3661 
3662 	intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
3663 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3664 		     hw_state->mg_clktop2_coreclkctl1);
3665 
3666 	intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
3667 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3668 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3669 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3670 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3671 		     hw_state->mg_clktop2_hsclkctl);
3672 
3673 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3674 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3675 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3676 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3677 		       hw_state->mg_pll_frac_lock);
3678 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3679 
3680 	intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
3681 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3682 
3683 	intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
3684 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3685 		     hw_state->mg_pll_tdc_coldst_bias);
3686 
3687 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3688 }
3689 
3690 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3691 			  struct intel_shared_dpll *pll)
3692 {
3693 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3694 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3695 	u32 val;
3696 
3697 	/*
3698 	 * All registers programmed here have the same HIP_INDEX_REG even
3699 	 * though on different building block
3700 	 */
3701 	/* All the registers are RMW */
3702 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3703 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3704 	val |= hw_state->mg_refclkin_ctl;
3705 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3706 
3707 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3708 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3709 	val |= hw_state->mg_clktop2_coreclkctl1;
3710 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3711 
3712 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3713 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3714 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3715 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3716 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3717 	val |= hw_state->mg_clktop2_hsclkctl;
3718 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3719 
3720 	val = DKL_PLL_DIV0_MASK;
3721 	if (dev_priv->display.vbt.override_afc_startup)
3722 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3723 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3724 			  hw_state->mg_pll_div0);
3725 
3726 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3727 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3728 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3729 	val |= hw_state->mg_pll_div1;
3730 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3731 
3732 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3733 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3734 		 DKL_PLL_SSC_STEP_LEN_MASK |
3735 		 DKL_PLL_SSC_STEP_NUM_MASK |
3736 		 DKL_PLL_SSC_EN);
3737 	val |= hw_state->mg_pll_ssc;
3738 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3739 
3740 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3741 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3742 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3743 	val |= hw_state->mg_pll_bias;
3744 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3745 
3746 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3747 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3748 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3749 	val |= hw_state->mg_pll_tdc_coldst_bias;
3750 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3751 
3752 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3753 }
3754 
3755 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3756 				 struct intel_shared_dpll *pll,
3757 				 i915_reg_t enable_reg)
3758 {
3759 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3760 
3761 	/*
3762 	 * The spec says we need to "wait" but it also says it should be
3763 	 * immediate.
3764 	 */
3765 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3766 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3767 			pll->info->id);
3768 }
3769 
3770 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3771 			   struct intel_shared_dpll *pll,
3772 			   i915_reg_t enable_reg)
3773 {
3774 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3775 
3776 	/* Timeout is actually 600us. */
3777 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3778 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3779 }
3780 
3781 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3782 {
3783 	u32 val;
3784 
3785 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3786 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3787 		return;
3788 	/*
3789 	 * Wa_16011069516:adl-p[a0]
3790 	 *
3791 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3792 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3793 	 * sanity check this assumption with a double read, which presumably
3794 	 * returns the correct value even with clock gating on.
3795 	 *
3796 	 * Instead of the usual place for workarounds we apply this one here,
3797 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3798 	 */
3799 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3800 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3801 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3802 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3803 }
3804 
3805 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3806 			     struct intel_shared_dpll *pll)
3807 {
3808 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3809 
3810 	if (IS_JSL_EHL(dev_priv) &&
3811 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3812 
3813 		/*
3814 		 * We need to disable DC states when this DPLL is enabled.
3815 		 * This can be done by taking a reference on DPLL4 power
3816 		 * domain.
3817 		 */
3818 		pll->wakeref = intel_display_power_get(dev_priv,
3819 						       POWER_DOMAIN_DC_OFF);
3820 	}
3821 
3822 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3823 
3824 	icl_dpll_write(dev_priv, pll);
3825 
3826 	/*
3827 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3828 	 * paths should already be setting the appropriate voltage, hence we do
3829 	 * nothing here.
3830 	 */
3831 
3832 	icl_pll_enable(dev_priv, pll, enable_reg);
3833 
3834 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3835 
3836 	/* DVFS post sequence would be here. See the comment above. */
3837 }
3838 
3839 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3840 			   struct intel_shared_dpll *pll)
3841 {
3842 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3843 
3844 	icl_dpll_write(dev_priv, pll);
3845 
3846 	/*
3847 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3848 	 * paths should already be setting the appropriate voltage, hence we do
3849 	 * nothing here.
3850 	 */
3851 
3852 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3853 
3854 	/* DVFS post sequence would be here. See the comment above. */
3855 }
3856 
3857 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3858 			  struct intel_shared_dpll *pll)
3859 {
3860 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3861 
3862 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3863 
3864 	if (DISPLAY_VER(dev_priv) >= 12)
3865 		dkl_pll_write(dev_priv, pll);
3866 	else
3867 		icl_mg_pll_write(dev_priv, pll);
3868 
3869 	/*
3870 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3871 	 * paths should already be setting the appropriate voltage, hence we do
3872 	 * nothing here.
3873 	 */
3874 
3875 	icl_pll_enable(dev_priv, pll, enable_reg);
3876 
3877 	/* DVFS post sequence would be here. See the comment above. */
3878 }
3879 
3880 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3881 			    struct intel_shared_dpll *pll,
3882 			    i915_reg_t enable_reg)
3883 {
3884 	/* The first steps are done by intel_ddi_post_disable(). */
3885 
3886 	/*
3887 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3888 	 * paths should already be setting the appropriate voltage, hence we do
3889 	 * nothing here.
3890 	 */
3891 
3892 	intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3893 
3894 	/* Timeout is actually 1us. */
3895 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3896 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3897 
3898 	/* DVFS post sequence would be here. See the comment above. */
3899 
3900 	intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3901 
3902 	/*
3903 	 * The spec says we need to "wait" but it also says it should be
3904 	 * immediate.
3905 	 */
3906 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3907 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3908 			pll->info->id);
3909 }
3910 
3911 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3912 			      struct intel_shared_dpll *pll)
3913 {
3914 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3915 
3916 	icl_pll_disable(dev_priv, pll, enable_reg);
3917 
3918 	if (IS_JSL_EHL(dev_priv) &&
3919 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3920 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3921 					pll->wakeref);
3922 }
3923 
3924 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3925 			    struct intel_shared_dpll *pll)
3926 {
3927 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3928 }
3929 
3930 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3931 			   struct intel_shared_dpll *pll)
3932 {
3933 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3934 
3935 	icl_pll_disable(dev_priv, pll, enable_reg);
3936 }
3937 
3938 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3939 {
3940 	/* No SSC ref */
3941 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3942 }
3943 
3944 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3945 			      const struct intel_dpll_hw_state *hw_state)
3946 {
3947 	drm_dbg_kms(&dev_priv->drm,
3948 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3949 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3950 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3951 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3952 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3953 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3954 		    hw_state->cfgcr0, hw_state->cfgcr1,
3955 		    hw_state->div0,
3956 		    hw_state->mg_refclkin_ctl,
3957 		    hw_state->mg_clktop2_coreclkctl1,
3958 		    hw_state->mg_clktop2_hsclkctl,
3959 		    hw_state->mg_pll_div0,
3960 		    hw_state->mg_pll_div1,
3961 		    hw_state->mg_pll_lf,
3962 		    hw_state->mg_pll_frac_lock,
3963 		    hw_state->mg_pll_ssc,
3964 		    hw_state->mg_pll_bias,
3965 		    hw_state->mg_pll_tdc_coldst_bias);
3966 }
3967 
3968 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3969 	.enable = combo_pll_enable,
3970 	.disable = combo_pll_disable,
3971 	.get_hw_state = combo_pll_get_hw_state,
3972 	.get_freq = icl_ddi_combo_pll_get_freq,
3973 };
3974 
3975 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3976 	.enable = tbt_pll_enable,
3977 	.disable = tbt_pll_disable,
3978 	.get_hw_state = tbt_pll_get_hw_state,
3979 	.get_freq = icl_ddi_tbt_pll_get_freq,
3980 };
3981 
3982 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3983 	.enable = mg_pll_enable,
3984 	.disable = mg_pll_disable,
3985 	.get_hw_state = mg_pll_get_hw_state,
3986 	.get_freq = icl_ddi_mg_pll_get_freq,
3987 };
3988 
3989 static const struct dpll_info icl_plls[] = {
3990 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3991 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3992 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3993 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3994 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3995 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3996 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3997 	{ },
3998 };
3999 
4000 static const struct intel_dpll_mgr icl_pll_mgr = {
4001 	.dpll_info = icl_plls,
4002 	.compute_dplls = icl_compute_dplls,
4003 	.get_dplls = icl_get_dplls,
4004 	.put_dplls = icl_put_dplls,
4005 	.update_active_dpll = icl_update_active_dpll,
4006 	.update_ref_clks = icl_update_dpll_ref_clks,
4007 	.dump_hw_state = icl_dump_hw_state,
4008 };
4009 
4010 static const struct dpll_info ehl_plls[] = {
4011 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4012 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4013 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4014 	{ },
4015 };
4016 
4017 static const struct intel_dpll_mgr ehl_pll_mgr = {
4018 	.dpll_info = ehl_plls,
4019 	.compute_dplls = icl_compute_dplls,
4020 	.get_dplls = icl_get_dplls,
4021 	.put_dplls = icl_put_dplls,
4022 	.update_ref_clks = icl_update_dpll_ref_clks,
4023 	.dump_hw_state = icl_dump_hw_state,
4024 };
4025 
4026 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4027 	.enable = mg_pll_enable,
4028 	.disable = mg_pll_disable,
4029 	.get_hw_state = dkl_pll_get_hw_state,
4030 	.get_freq = icl_ddi_mg_pll_get_freq,
4031 };
4032 
4033 static const struct dpll_info tgl_plls[] = {
4034 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4035 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4036 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4037 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4038 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4039 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4040 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4041 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4042 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4043 	{ },
4044 };
4045 
4046 static const struct intel_dpll_mgr tgl_pll_mgr = {
4047 	.dpll_info = tgl_plls,
4048 	.compute_dplls = icl_compute_dplls,
4049 	.get_dplls = icl_get_dplls,
4050 	.put_dplls = icl_put_dplls,
4051 	.update_active_dpll = icl_update_active_dpll,
4052 	.update_ref_clks = icl_update_dpll_ref_clks,
4053 	.dump_hw_state = icl_dump_hw_state,
4054 };
4055 
4056 static const struct dpll_info rkl_plls[] = {
4057 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4058 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4059 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4060 	{ },
4061 };
4062 
4063 static const struct intel_dpll_mgr rkl_pll_mgr = {
4064 	.dpll_info = rkl_plls,
4065 	.compute_dplls = icl_compute_dplls,
4066 	.get_dplls = icl_get_dplls,
4067 	.put_dplls = icl_put_dplls,
4068 	.update_ref_clks = icl_update_dpll_ref_clks,
4069 	.dump_hw_state = icl_dump_hw_state,
4070 };
4071 
4072 static const struct dpll_info dg1_plls[] = {
4073 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4074 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4075 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4076 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4077 	{ },
4078 };
4079 
4080 static const struct intel_dpll_mgr dg1_pll_mgr = {
4081 	.dpll_info = dg1_plls,
4082 	.compute_dplls = icl_compute_dplls,
4083 	.get_dplls = icl_get_dplls,
4084 	.put_dplls = icl_put_dplls,
4085 	.update_ref_clks = icl_update_dpll_ref_clks,
4086 	.dump_hw_state = icl_dump_hw_state,
4087 };
4088 
4089 static const struct dpll_info adls_plls[] = {
4090 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4091 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4092 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4093 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4094 	{ },
4095 };
4096 
4097 static const struct intel_dpll_mgr adls_pll_mgr = {
4098 	.dpll_info = adls_plls,
4099 	.compute_dplls = icl_compute_dplls,
4100 	.get_dplls = icl_get_dplls,
4101 	.put_dplls = icl_put_dplls,
4102 	.update_ref_clks = icl_update_dpll_ref_clks,
4103 	.dump_hw_state = icl_dump_hw_state,
4104 };
4105 
4106 static const struct dpll_info adlp_plls[] = {
4107 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4108 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4109 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4110 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4111 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4112 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4113 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4114 	{ },
4115 };
4116 
4117 static const struct intel_dpll_mgr adlp_pll_mgr = {
4118 	.dpll_info = adlp_plls,
4119 	.compute_dplls = icl_compute_dplls,
4120 	.get_dplls = icl_get_dplls,
4121 	.put_dplls = icl_put_dplls,
4122 	.update_active_dpll = icl_update_active_dpll,
4123 	.update_ref_clks = icl_update_dpll_ref_clks,
4124 	.dump_hw_state = icl_dump_hw_state,
4125 };
4126 
4127 /**
4128  * intel_shared_dpll_init - Initialize shared DPLLs
4129  * @dev_priv: i915 device
4130  *
4131  * Initialize shared DPLLs for @dev_priv.
4132  */
4133 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4134 {
4135 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4136 	const struct dpll_info *dpll_info;
4137 	int i;
4138 
4139 	mutex_init(&dev_priv->display.dpll.lock);
4140 
4141 	if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
4142 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4143 		dpll_mgr = NULL;
4144 	else if (IS_ALDERLAKE_P(dev_priv))
4145 		dpll_mgr = &adlp_pll_mgr;
4146 	else if (IS_ALDERLAKE_S(dev_priv))
4147 		dpll_mgr = &adls_pll_mgr;
4148 	else if (IS_DG1(dev_priv))
4149 		dpll_mgr = &dg1_pll_mgr;
4150 	else if (IS_ROCKETLAKE(dev_priv))
4151 		dpll_mgr = &rkl_pll_mgr;
4152 	else if (DISPLAY_VER(dev_priv) >= 12)
4153 		dpll_mgr = &tgl_pll_mgr;
4154 	else if (IS_JSL_EHL(dev_priv))
4155 		dpll_mgr = &ehl_pll_mgr;
4156 	else if (DISPLAY_VER(dev_priv) >= 11)
4157 		dpll_mgr = &icl_pll_mgr;
4158 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4159 		dpll_mgr = &bxt_pll_mgr;
4160 	else if (DISPLAY_VER(dev_priv) == 9)
4161 		dpll_mgr = &skl_pll_mgr;
4162 	else if (HAS_DDI(dev_priv))
4163 		dpll_mgr = &hsw_pll_mgr;
4164 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4165 		dpll_mgr = &pch_pll_mgr;
4166 
4167 	if (!dpll_mgr) {
4168 		dev_priv->display.dpll.num_shared_dpll = 0;
4169 		return;
4170 	}
4171 
4172 	dpll_info = dpll_mgr->dpll_info;
4173 
4174 	for (i = 0; dpll_info[i].name; i++) {
4175 		if (drm_WARN_ON(&dev_priv->drm,
4176 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4177 			break;
4178 
4179 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4180 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4181 	}
4182 
4183 	dev_priv->display.dpll.mgr = dpll_mgr;
4184 	dev_priv->display.dpll.num_shared_dpll = i;
4185 }
4186 
4187 /**
4188  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4189  * @state: atomic state
4190  * @crtc: CRTC to compute DPLLs for
4191  * @encoder: encoder
4192  *
4193  * This function computes the DPLL state for the given CRTC and encoder.
4194  *
4195  * The new configuration in the atomic commit @state is made effective by
4196  * calling intel_shared_dpll_swap_state().
4197  *
4198  * Returns:
4199  * 0 on success, negative error code on falure.
4200  */
4201 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4202 			       struct intel_crtc *crtc,
4203 			       struct intel_encoder *encoder)
4204 {
4205 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4206 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4207 
4208 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4209 		return -EINVAL;
4210 
4211 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4212 }
4213 
4214 /**
4215  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4216  * @state: atomic state
4217  * @crtc: CRTC to reserve DPLLs for
4218  * @encoder: encoder
4219  *
4220  * This function reserves all required DPLLs for the given CRTC and encoder
4221  * combination in the current atomic commit @state and the new @crtc atomic
4222  * state.
4223  *
4224  * The new configuration in the atomic commit @state is made effective by
4225  * calling intel_shared_dpll_swap_state().
4226  *
4227  * The reserved DPLLs should be released by calling
4228  * intel_release_shared_dplls().
4229  *
4230  * Returns:
4231  * 0 if all required DPLLs were successfully reserved,
4232  * negative error code otherwise.
4233  */
4234 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4235 			       struct intel_crtc *crtc,
4236 			       struct intel_encoder *encoder)
4237 {
4238 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4239 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4240 
4241 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4242 		return -EINVAL;
4243 
4244 	return dpll_mgr->get_dplls(state, crtc, encoder);
4245 }
4246 
4247 /**
4248  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4249  * @state: atomic state
4250  * @crtc: crtc from which the DPLLs are to be released
4251  *
4252  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4253  * from the current atomic commit @state and the old @crtc atomic state.
4254  *
4255  * The new configuration in the atomic commit @state is made effective by
4256  * calling intel_shared_dpll_swap_state().
4257  */
4258 void intel_release_shared_dplls(struct intel_atomic_state *state,
4259 				struct intel_crtc *crtc)
4260 {
4261 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4262 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4263 
4264 	/*
4265 	 * FIXME: this function is called for every platform having a
4266 	 * compute_clock hook, even though the platform doesn't yet support
4267 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4268 	 * called on those.
4269 	 */
4270 	if (!dpll_mgr)
4271 		return;
4272 
4273 	dpll_mgr->put_dplls(state, crtc);
4274 }
4275 
4276 /**
4277  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4278  * @state: atomic state
4279  * @crtc: the CRTC for which to update the active DPLL
4280  * @encoder: encoder determining the type of port DPLL
4281  *
4282  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4283  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4284  * DPLL selected will be based on the current mode of the encoder's port.
4285  */
4286 void intel_update_active_dpll(struct intel_atomic_state *state,
4287 			      struct intel_crtc *crtc,
4288 			      struct intel_encoder *encoder)
4289 {
4290 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4291 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4292 
4293 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4294 		return;
4295 
4296 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4297 }
4298 
4299 /**
4300  * intel_dpll_get_freq - calculate the DPLL's output frequency
4301  * @i915: i915 device
4302  * @pll: DPLL for which to calculate the output frequency
4303  * @pll_state: DPLL state from which to calculate the output frequency
4304  *
4305  * Return the output frequency corresponding to @pll's passed in @pll_state.
4306  */
4307 int intel_dpll_get_freq(struct drm_i915_private *i915,
4308 			const struct intel_shared_dpll *pll,
4309 			const struct intel_dpll_hw_state *pll_state)
4310 {
4311 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4312 		return 0;
4313 
4314 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4315 }
4316 
4317 /**
4318  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4319  * @i915: i915 device
4320  * @pll: DPLL for which to calculate the output frequency
4321  * @hw_state: DPLL's hardware state
4322  *
4323  * Read out @pll's hardware state into @hw_state.
4324  */
4325 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4326 			     struct intel_shared_dpll *pll,
4327 			     struct intel_dpll_hw_state *hw_state)
4328 {
4329 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4330 }
4331 
4332 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4333 				  struct intel_shared_dpll *pll)
4334 {
4335 	struct intel_crtc *crtc;
4336 
4337 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4338 
4339 	if (IS_JSL_EHL(i915) && pll->on &&
4340 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4341 		pll->wakeref = intel_display_power_get(i915,
4342 						       POWER_DOMAIN_DC_OFF);
4343 	}
4344 
4345 	pll->state.pipe_mask = 0;
4346 	for_each_intel_crtc(&i915->drm, crtc) {
4347 		struct intel_crtc_state *crtc_state =
4348 			to_intel_crtc_state(crtc->base.state);
4349 
4350 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4351 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4352 	}
4353 	pll->active_mask = pll->state.pipe_mask;
4354 
4355 	drm_dbg_kms(&i915->drm,
4356 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4357 		    pll->info->name, pll->state.pipe_mask, pll->on);
4358 }
4359 
4360 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4361 {
4362 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4363 		i915->display.dpll.mgr->update_ref_clks(i915);
4364 }
4365 
4366 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4367 {
4368 	int i;
4369 
4370 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4371 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4372 }
4373 
4374 static void sanitize_dpll_state(struct drm_i915_private *i915,
4375 				struct intel_shared_dpll *pll)
4376 {
4377 	if (!pll->on)
4378 		return;
4379 
4380 	adlp_cmtg_clock_gating_wa(i915, pll);
4381 
4382 	if (pll->active_mask)
4383 		return;
4384 
4385 	drm_dbg_kms(&i915->drm,
4386 		    "%s enabled but not in use, disabling\n",
4387 		    pll->info->name);
4388 
4389 	pll->info->funcs->disable(i915, pll);
4390 	pll->on = false;
4391 }
4392 
4393 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4394 {
4395 	int i;
4396 
4397 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4398 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4399 }
4400 
4401 /**
4402  * intel_dpll_dump_hw_state - write hw_state to dmesg
4403  * @dev_priv: i915 drm device
4404  * @hw_state: hw state to be written to the log
4405  *
4406  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4407  */
4408 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4409 			      const struct intel_dpll_hw_state *hw_state)
4410 {
4411 	if (dev_priv->display.dpll.mgr) {
4412 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4413 	} else {
4414 		/* fallback for platforms that don't use the shared dpll
4415 		 * infrastructure
4416 		 */
4417 		drm_dbg_kms(&dev_priv->drm,
4418 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4419 			    "fp0: 0x%x, fp1: 0x%x\n",
4420 			    hw_state->dpll,
4421 			    hw_state->dpll_md,
4422 			    hw_state->fp0,
4423 			    hw_state->fp1);
4424 	}
4425 }
4426 
4427 static void
4428 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4429 			 struct intel_shared_dpll *pll,
4430 			 struct intel_crtc *crtc,
4431 			 struct intel_crtc_state *new_crtc_state)
4432 {
4433 	struct intel_dpll_hw_state dpll_hw_state;
4434 	u8 pipe_mask;
4435 	bool active;
4436 
4437 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4438 
4439 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4440 
4441 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4442 
4443 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4444 		I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask,
4445 				"pll in active use but not on in sw tracking\n");
4446 		I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask,
4447 				"pll is on but not used by any active pipe\n");
4448 		I915_STATE_WARN(dev_priv, pll->on != active,
4449 				"pll on state mismatch (expected %i, found %i)\n",
4450 				pll->on, active);
4451 	}
4452 
4453 	if (!crtc) {
4454 		I915_STATE_WARN(dev_priv,
4455 				pll->active_mask & ~pll->state.pipe_mask,
4456 				"more active pll users than references: 0x%x vs 0x%x\n",
4457 				pll->active_mask, pll->state.pipe_mask);
4458 
4459 		return;
4460 	}
4461 
4462 	pipe_mask = BIT(crtc->pipe);
4463 
4464 	if (new_crtc_state->hw.active)
4465 		I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask),
4466 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4467 				pipe_name(crtc->pipe), pll->active_mask);
4468 	else
4469 		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4470 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4471 				pipe_name(crtc->pipe), pll->active_mask);
4472 
4473 	I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask),
4474 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4475 			pipe_mask, pll->state.pipe_mask);
4476 
4477 	I915_STATE_WARN(dev_priv,
4478 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4479 					  sizeof(dpll_hw_state)),
4480 			"pll hw state mismatch\n");
4481 }
4482 
4483 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4484 				    struct intel_crtc_state *old_crtc_state,
4485 				    struct intel_crtc_state *new_crtc_state)
4486 {
4487 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4488 
4489 	if (new_crtc_state->shared_dpll)
4490 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4491 					 crtc, new_crtc_state);
4492 
4493 	if (old_crtc_state->shared_dpll &&
4494 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4495 		u8 pipe_mask = BIT(crtc->pipe);
4496 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4497 
4498 		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4499 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4500 				pipe_name(crtc->pipe), pll->active_mask);
4501 		I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask,
4502 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4503 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4504 	}
4505 }
4506 
4507 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4508 {
4509 	int i;
4510 
4511 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4512 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4513 					 NULL, NULL);
4514 }
4515