1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "i915_reg.h"
27 #include "intel_de.h"
28 #include "intel_display_types.h"
29 #include "intel_dkl_phy.h"
30 #include "intel_dkl_phy_regs.h"
31 #include "intel_dpio_phy.h"
32 #include "intel_dpll.h"
33 #include "intel_dpll_mgr.h"
34 #include "intel_hti.h"
35 #include "intel_mg_phy_regs.h"
36 #include "intel_pch_refclk.h"
37 #include "intel_tc.h"
38 
39 /**
40  * DOC: Display PLLs
41  *
42  * Display PLLs used for driving outputs vary by platform. While some have
43  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
44  * from a pool. In the latter scenario, it is possible that multiple pipes
45  * share a PLL if their configurations match.
46  *
47  * This file provides an abstraction over display PLLs. The function
48  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
49  * users of a PLL are tracked and that tracking is integrated with the atomic
50  * modset interface. During an atomic operation, required PLLs can be reserved
51  * for a given CRTC and encoder configuration by calling
52  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
53  * with intel_release_shared_dplls().
54  * Changes to the users are first staged in the atomic state, and then made
55  * effective by calling intel_shared_dpll_swap_state() during the atomic
56  * commit phase.
57  */
58 
59 /* platform specific hooks for managing DPLLs */
60 struct intel_shared_dpll_funcs {
61 	/*
62 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
63 	 * the pll is not already enabled.
64 	 */
65 	void (*enable)(struct drm_i915_private *i915,
66 		       struct intel_shared_dpll *pll);
67 
68 	/*
69 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
70 	 * only when it is safe to disable the pll, i.e., there are no more
71 	 * tracked users for it.
72 	 */
73 	void (*disable)(struct drm_i915_private *i915,
74 			struct intel_shared_dpll *pll);
75 
76 	/*
77 	 * Hook for reading the values currently programmed to the DPLL
78 	 * registers. This is used for initial hw state readout and state
79 	 * verification after a mode set.
80 	 */
81 	bool (*get_hw_state)(struct drm_i915_private *i915,
82 			     struct intel_shared_dpll *pll,
83 			     struct intel_dpll_hw_state *hw_state);
84 
85 	/*
86 	 * Hook for calculating the pll's output frequency based on its passed
87 	 * in state.
88 	 */
89 	int (*get_freq)(struct drm_i915_private *i915,
90 			const struct intel_shared_dpll *pll,
91 			const struct intel_dpll_hw_state *pll_state);
92 };
93 
94 struct intel_dpll_mgr {
95 	const struct dpll_info *dpll_info;
96 
97 	int (*compute_dplls)(struct intel_atomic_state *state,
98 			     struct intel_crtc *crtc,
99 			     struct intel_encoder *encoder);
100 	int (*get_dplls)(struct intel_atomic_state *state,
101 			 struct intel_crtc *crtc,
102 			 struct intel_encoder *encoder);
103 	void (*put_dplls)(struct intel_atomic_state *state,
104 			  struct intel_crtc *crtc);
105 	void (*update_active_dpll)(struct intel_atomic_state *state,
106 				   struct intel_crtc *crtc,
107 				   struct intel_encoder *encoder);
108 	void (*update_ref_clks)(struct drm_i915_private *i915);
109 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
110 			      const struct intel_dpll_hw_state *hw_state);
111 };
112 
113 static void
114 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
115 				  struct intel_shared_dpll_state *shared_dpll)
116 {
117 	enum intel_dpll_id i;
118 
119 	/* Copy shared dpll state */
120 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
121 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
122 
123 		shared_dpll[i] = pll->state;
124 	}
125 }
126 
127 static struct intel_shared_dpll_state *
128 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
129 {
130 	struct intel_atomic_state *state = to_intel_atomic_state(s);
131 
132 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
133 
134 	if (!state->dpll_set) {
135 		state->dpll_set = true;
136 
137 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
138 						  state->shared_dpll);
139 	}
140 
141 	return state->shared_dpll;
142 }
143 
144 /**
145  * intel_get_shared_dpll_by_id - get a DPLL given its id
146  * @dev_priv: i915 device instance
147  * @id: pll id
148  *
149  * Returns:
150  * A pointer to the DPLL with @id
151  */
152 struct intel_shared_dpll *
153 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
154 			    enum intel_dpll_id id)
155 {
156 	return &dev_priv->display.dpll.shared_dplls[id];
157 }
158 
159 /* For ILK+ */
160 void assert_shared_dpll(struct drm_i915_private *dev_priv,
161 			struct intel_shared_dpll *pll,
162 			bool state)
163 {
164 	bool cur_state;
165 	struct intel_dpll_hw_state hw_state;
166 
167 	if (drm_WARN(&dev_priv->drm, !pll,
168 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
169 		return;
170 
171 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 	I915_STATE_WARN(dev_priv, cur_state != state,
173 			"%s assertion failure (expected %s, current %s)\n",
174 			pll->info->name, str_on_off(state),
175 			str_on_off(cur_state));
176 }
177 
178 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
179 {
180 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
181 }
182 
183 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
184 {
185 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
186 }
187 
188 static i915_reg_t
189 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
190 			   struct intel_shared_dpll *pll)
191 {
192 	if (IS_DG1(i915))
193 		return DG1_DPLL_ENABLE(pll->info->id);
194 	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
195 		 (pll->info->id == DPLL_ID_EHL_DPLL4))
196 		return MG_PLL_ENABLE(0);
197 
198 	return ICL_DPLL_ENABLE(pll->info->id);
199 }
200 
201 static i915_reg_t
202 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
203 			struct intel_shared_dpll *pll)
204 {
205 	const enum intel_dpll_id id = pll->info->id;
206 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
207 
208 	if (IS_ALDERLAKE_P(i915))
209 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
210 
211 	return MG_PLL_ENABLE(tc_port);
212 }
213 
214 /**
215  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
216  * @crtc_state: CRTC, and its state, which has a shared DPLL
217  *
218  * Enable the shared DPLL used by @crtc.
219  */
220 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
221 {
222 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
223 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
224 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
225 	unsigned int pipe_mask = BIT(crtc->pipe);
226 	unsigned int old_mask;
227 
228 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
229 		return;
230 
231 	mutex_lock(&dev_priv->display.dpll.lock);
232 	old_mask = pll->active_mask;
233 
234 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
235 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
236 		goto out;
237 
238 	pll->active_mask |= pipe_mask;
239 
240 	drm_dbg_kms(&dev_priv->drm,
241 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
242 		    pll->info->name, pll->active_mask, pll->on,
243 		    crtc->base.base.id, crtc->base.name);
244 
245 	if (old_mask) {
246 		drm_WARN_ON(&dev_priv->drm, !pll->on);
247 		assert_shared_dpll_enabled(dev_priv, pll);
248 		goto out;
249 	}
250 	drm_WARN_ON(&dev_priv->drm, pll->on);
251 
252 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
253 	pll->info->funcs->enable(dev_priv, pll);
254 	pll->on = true;
255 
256 out:
257 	mutex_unlock(&dev_priv->display.dpll.lock);
258 }
259 
260 /**
261  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
262  * @crtc_state: CRTC, and its state, which has a shared DPLL
263  *
264  * Disable the shared DPLL used by @crtc.
265  */
266 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
267 {
268 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
269 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
270 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
271 	unsigned int pipe_mask = BIT(crtc->pipe);
272 
273 	/* PCH only available on ILK+ */
274 	if (DISPLAY_VER(dev_priv) < 5)
275 		return;
276 
277 	if (pll == NULL)
278 		return;
279 
280 	mutex_lock(&dev_priv->display.dpll.lock);
281 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
282 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
283 		     crtc->base.base.id, crtc->base.name))
284 		goto out;
285 
286 	drm_dbg_kms(&dev_priv->drm,
287 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
288 		    pll->info->name, pll->active_mask, pll->on,
289 		    crtc->base.base.id, crtc->base.name);
290 
291 	assert_shared_dpll_enabled(dev_priv, pll);
292 	drm_WARN_ON(&dev_priv->drm, !pll->on);
293 
294 	pll->active_mask &= ~pipe_mask;
295 	if (pll->active_mask)
296 		goto out;
297 
298 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
299 	pll->info->funcs->disable(dev_priv, pll);
300 	pll->on = false;
301 
302 out:
303 	mutex_unlock(&dev_priv->display.dpll.lock);
304 }
305 
306 static struct intel_shared_dpll *
307 intel_find_shared_dpll(struct intel_atomic_state *state,
308 		       const struct intel_crtc *crtc,
309 		       const struct intel_dpll_hw_state *pll_state,
310 		       unsigned long dpll_mask)
311 {
312 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
313 	struct intel_shared_dpll *pll, *unused_pll = NULL;
314 	struct intel_shared_dpll_state *shared_dpll;
315 	enum intel_dpll_id i;
316 
317 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
318 
319 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
320 
321 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
322 		pll = &dev_priv->display.dpll.shared_dplls[i];
323 
324 		/* Only want to check enabled timings first */
325 		if (shared_dpll[i].pipe_mask == 0) {
326 			if (!unused_pll)
327 				unused_pll = pll;
328 			continue;
329 		}
330 
331 		if (memcmp(pll_state,
332 			   &shared_dpll[i].hw_state,
333 			   sizeof(*pll_state)) == 0) {
334 			drm_dbg_kms(&dev_priv->drm,
335 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
336 				    crtc->base.base.id, crtc->base.name,
337 				    pll->info->name,
338 				    shared_dpll[i].pipe_mask,
339 				    pll->active_mask);
340 			return pll;
341 		}
342 	}
343 
344 	/* Ok no matching timings, maybe there's a free one? */
345 	if (unused_pll) {
346 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
347 			    crtc->base.base.id, crtc->base.name,
348 			    unused_pll->info->name);
349 		return unused_pll;
350 	}
351 
352 	return NULL;
353 }
354 
355 /**
356  * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
357  * @crtc: CRTC on which behalf the reference is taken
358  * @pll: DPLL for which the reference is taken
359  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
360  *
361  * Take a reference for @pll tracking the use of it by @crtc.
362  */
363 static void
364 intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
365 				 const struct intel_shared_dpll *pll,
366 				 struct intel_shared_dpll_state *shared_dpll_state)
367 {
368 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
369 
370 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);
371 
372 	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);
373 
374 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
375 		    crtc->base.base.id, crtc->base.name, pll->info->name);
376 }
377 
378 static void
379 intel_reference_shared_dpll(struct intel_atomic_state *state,
380 			    const struct intel_crtc *crtc,
381 			    const struct intel_shared_dpll *pll,
382 			    const struct intel_dpll_hw_state *pll_state)
383 {
384 	struct intel_shared_dpll_state *shared_dpll;
385 	const enum intel_dpll_id id = pll->info->id;
386 
387 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
388 
389 	if (shared_dpll[id].pipe_mask == 0)
390 		shared_dpll[id].hw_state = *pll_state;
391 
392 	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
393 }
394 
395 /**
396  * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
397  * @crtc: CRTC on which behalf the reference is dropped
398  * @pll: DPLL for which the reference is dropped
399  * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
400  *
401  * Drop a reference for @pll tracking the end of use of it by @crtc.
402  */
403 void
404 intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
405 				   const struct intel_shared_dpll *pll,
406 				   struct intel_shared_dpll_state *shared_dpll_state)
407 {
408 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
409 
410 	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
411 
412 	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);
413 
414 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
415 		    crtc->base.base.id, crtc->base.name, pll->info->name);
416 }
417 
418 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
419 					  const struct intel_crtc *crtc,
420 					  const struct intel_shared_dpll *pll)
421 {
422 	struct intel_shared_dpll_state *shared_dpll;
423 	const enum intel_dpll_id id = pll->info->id;
424 
425 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
426 
427 	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
428 }
429 
430 static void intel_put_dpll(struct intel_atomic_state *state,
431 			   struct intel_crtc *crtc)
432 {
433 	const struct intel_crtc_state *old_crtc_state =
434 		intel_atomic_get_old_crtc_state(state, crtc);
435 	struct intel_crtc_state *new_crtc_state =
436 		intel_atomic_get_new_crtc_state(state, crtc);
437 
438 	new_crtc_state->shared_dpll = NULL;
439 
440 	if (!old_crtc_state->shared_dpll)
441 		return;
442 
443 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
444 }
445 
446 /**
447  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
448  * @state: atomic state
449  *
450  * This is the dpll version of drm_atomic_helper_swap_state() since the
451  * helper does not handle driver-specific global state.
452  *
453  * For consistency with atomic helpers this function does a complete swap,
454  * i.e. it also puts the current state into @state, even though there is no
455  * need for that at this moment.
456  */
457 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
458 {
459 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
460 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
461 	enum intel_dpll_id i;
462 
463 	if (!state->dpll_set)
464 		return;
465 
466 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
467 		struct intel_shared_dpll *pll =
468 			&dev_priv->display.dpll.shared_dplls[i];
469 
470 		swap(pll->state, shared_dpll[i]);
471 	}
472 }
473 
474 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
475 				      struct intel_shared_dpll *pll,
476 				      struct intel_dpll_hw_state *hw_state)
477 {
478 	const enum intel_dpll_id id = pll->info->id;
479 	intel_wakeref_t wakeref;
480 	u32 val;
481 
482 	wakeref = intel_display_power_get_if_enabled(dev_priv,
483 						     POWER_DOMAIN_DISPLAY_CORE);
484 	if (!wakeref)
485 		return false;
486 
487 	val = intel_de_read(dev_priv, PCH_DPLL(id));
488 	hw_state->dpll = val;
489 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
490 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
491 
492 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
493 
494 	return val & DPLL_VCO_ENABLE;
495 }
496 
497 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
498 {
499 	u32 val;
500 	bool enabled;
501 
502 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
503 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
504 			    DREF_SUPERSPREAD_SOURCE_MASK));
505 	I915_STATE_WARN(dev_priv, !enabled,
506 			"PCH refclk assertion failure, should be active but is disabled\n");
507 }
508 
509 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
510 				struct intel_shared_dpll *pll)
511 {
512 	const enum intel_dpll_id id = pll->info->id;
513 
514 	/* PCH refclock must be enabled first */
515 	ibx_assert_pch_refclk_enabled(dev_priv);
516 
517 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
518 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
519 
520 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
521 
522 	/* Wait for the clocks to stabilize. */
523 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
524 	udelay(150);
525 
526 	/* The pixel multiplier can only be updated once the
527 	 * DPLL is enabled and the clocks are stable.
528 	 *
529 	 * So write it again.
530 	 */
531 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
532 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
533 	udelay(200);
534 }
535 
536 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
537 				 struct intel_shared_dpll *pll)
538 {
539 	const enum intel_dpll_id id = pll->info->id;
540 
541 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
542 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
543 	udelay(200);
544 }
545 
546 static int ibx_compute_dpll(struct intel_atomic_state *state,
547 			    struct intel_crtc *crtc,
548 			    struct intel_encoder *encoder)
549 {
550 	return 0;
551 }
552 
553 static int ibx_get_dpll(struct intel_atomic_state *state,
554 			struct intel_crtc *crtc,
555 			struct intel_encoder *encoder)
556 {
557 	struct intel_crtc_state *crtc_state =
558 		intel_atomic_get_new_crtc_state(state, crtc);
559 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
560 	struct intel_shared_dpll *pll;
561 	enum intel_dpll_id i;
562 
563 	if (HAS_PCH_IBX(dev_priv)) {
564 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
565 		i = (enum intel_dpll_id) crtc->pipe;
566 		pll = &dev_priv->display.dpll.shared_dplls[i];
567 
568 		drm_dbg_kms(&dev_priv->drm,
569 			    "[CRTC:%d:%s] using pre-allocated %s\n",
570 			    crtc->base.base.id, crtc->base.name,
571 			    pll->info->name);
572 	} else {
573 		pll = intel_find_shared_dpll(state, crtc,
574 					     &crtc_state->dpll_hw_state,
575 					     BIT(DPLL_ID_PCH_PLL_B) |
576 					     BIT(DPLL_ID_PCH_PLL_A));
577 	}
578 
579 	if (!pll)
580 		return -EINVAL;
581 
582 	/* reference the pll */
583 	intel_reference_shared_dpll(state, crtc,
584 				    pll, &crtc_state->dpll_hw_state);
585 
586 	crtc_state->shared_dpll = pll;
587 
588 	return 0;
589 }
590 
591 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
592 			      const struct intel_dpll_hw_state *hw_state)
593 {
594 	drm_dbg_kms(&dev_priv->drm,
595 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
596 		    "fp0: 0x%x, fp1: 0x%x\n",
597 		    hw_state->dpll,
598 		    hw_state->dpll_md,
599 		    hw_state->fp0,
600 		    hw_state->fp1);
601 }
602 
603 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
604 	.enable = ibx_pch_dpll_enable,
605 	.disable = ibx_pch_dpll_disable,
606 	.get_hw_state = ibx_pch_dpll_get_hw_state,
607 };
608 
609 static const struct dpll_info pch_plls[] = {
610 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
611 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
612 	{ },
613 };
614 
615 static const struct intel_dpll_mgr pch_pll_mgr = {
616 	.dpll_info = pch_plls,
617 	.compute_dplls = ibx_compute_dpll,
618 	.get_dplls = ibx_get_dpll,
619 	.put_dplls = intel_put_dpll,
620 	.dump_hw_state = ibx_dump_hw_state,
621 };
622 
623 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
624 				 struct intel_shared_dpll *pll)
625 {
626 	const enum intel_dpll_id id = pll->info->id;
627 
628 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
629 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
630 	udelay(20);
631 }
632 
633 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
634 				struct intel_shared_dpll *pll)
635 {
636 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
637 	intel_de_posting_read(dev_priv, SPLL_CTL);
638 	udelay(20);
639 }
640 
641 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
642 				  struct intel_shared_dpll *pll)
643 {
644 	const enum intel_dpll_id id = pll->info->id;
645 
646 	intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
647 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
648 
649 	/*
650 	 * Try to set up the PCH reference clock once all DPLLs
651 	 * that depend on it have been shut down.
652 	 */
653 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
654 		intel_init_pch_refclk(dev_priv);
655 }
656 
657 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
658 				 struct intel_shared_dpll *pll)
659 {
660 	enum intel_dpll_id id = pll->info->id;
661 
662 	intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
663 	intel_de_posting_read(dev_priv, SPLL_CTL);
664 
665 	/*
666 	 * Try to set up the PCH reference clock once all DPLLs
667 	 * that depend on it have been shut down.
668 	 */
669 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
670 		intel_init_pch_refclk(dev_priv);
671 }
672 
673 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
674 				       struct intel_shared_dpll *pll,
675 				       struct intel_dpll_hw_state *hw_state)
676 {
677 	const enum intel_dpll_id id = pll->info->id;
678 	intel_wakeref_t wakeref;
679 	u32 val;
680 
681 	wakeref = intel_display_power_get_if_enabled(dev_priv,
682 						     POWER_DOMAIN_DISPLAY_CORE);
683 	if (!wakeref)
684 		return false;
685 
686 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
687 	hw_state->wrpll = val;
688 
689 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
690 
691 	return val & WRPLL_PLL_ENABLE;
692 }
693 
694 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
695 				      struct intel_shared_dpll *pll,
696 				      struct intel_dpll_hw_state *hw_state)
697 {
698 	intel_wakeref_t wakeref;
699 	u32 val;
700 
701 	wakeref = intel_display_power_get_if_enabled(dev_priv,
702 						     POWER_DOMAIN_DISPLAY_CORE);
703 	if (!wakeref)
704 		return false;
705 
706 	val = intel_de_read(dev_priv, SPLL_CTL);
707 	hw_state->spll = val;
708 
709 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
710 
711 	return val & SPLL_PLL_ENABLE;
712 }
713 
714 #define LC_FREQ 2700
715 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
716 
717 #define P_MIN 2
718 #define P_MAX 64
719 #define P_INC 2
720 
721 /* Constraints for PLL good behavior */
722 #define REF_MIN 48
723 #define REF_MAX 400
724 #define VCO_MIN 2400
725 #define VCO_MAX 4800
726 
727 struct hsw_wrpll_rnp {
728 	unsigned p, n2, r2;
729 };
730 
731 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
732 {
733 	switch (clock) {
734 	case 25175000:
735 	case 25200000:
736 	case 27000000:
737 	case 27027000:
738 	case 37762500:
739 	case 37800000:
740 	case 40500000:
741 	case 40541000:
742 	case 54000000:
743 	case 54054000:
744 	case 59341000:
745 	case 59400000:
746 	case 72000000:
747 	case 74176000:
748 	case 74250000:
749 	case 81000000:
750 	case 81081000:
751 	case 89012000:
752 	case 89100000:
753 	case 108000000:
754 	case 108108000:
755 	case 111264000:
756 	case 111375000:
757 	case 148352000:
758 	case 148500000:
759 	case 162000000:
760 	case 162162000:
761 	case 222525000:
762 	case 222750000:
763 	case 296703000:
764 	case 297000000:
765 		return 0;
766 	case 233500000:
767 	case 245250000:
768 	case 247750000:
769 	case 253250000:
770 	case 298000000:
771 		return 1500;
772 	case 169128000:
773 	case 169500000:
774 	case 179500000:
775 	case 202000000:
776 		return 2000;
777 	case 256250000:
778 	case 262500000:
779 	case 270000000:
780 	case 272500000:
781 	case 273750000:
782 	case 280750000:
783 	case 281250000:
784 	case 286000000:
785 	case 291750000:
786 		return 4000;
787 	case 267250000:
788 	case 268500000:
789 		return 5000;
790 	default:
791 		return 1000;
792 	}
793 }
794 
795 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
796 				 unsigned int r2, unsigned int n2,
797 				 unsigned int p,
798 				 struct hsw_wrpll_rnp *best)
799 {
800 	u64 a, b, c, d, diff, diff_best;
801 
802 	/* No best (r,n,p) yet */
803 	if (best->p == 0) {
804 		best->p = p;
805 		best->n2 = n2;
806 		best->r2 = r2;
807 		return;
808 	}
809 
810 	/*
811 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
812 	 * freq2k.
813 	 *
814 	 * delta = 1e6 *
815 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
816 	 *	   freq2k;
817 	 *
818 	 * and we would like delta <= budget.
819 	 *
820 	 * If the discrepancy is above the PPM-based budget, always prefer to
821 	 * improve upon the previous solution.  However, if you're within the
822 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
823 	 */
824 	a = freq2k * budget * p * r2;
825 	b = freq2k * budget * best->p * best->r2;
826 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
827 	diff_best = abs_diff(freq2k * best->p * best->r2,
828 			     LC_FREQ_2K * best->n2);
829 	c = 1000000 * diff;
830 	d = 1000000 * diff_best;
831 
832 	if (a < c && b < d) {
833 		/* If both are above the budget, pick the closer */
834 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
835 			best->p = p;
836 			best->n2 = n2;
837 			best->r2 = r2;
838 		}
839 	} else if (a >= c && b < d) {
840 		/* If A is below the threshold but B is above it?  Update. */
841 		best->p = p;
842 		best->n2 = n2;
843 		best->r2 = r2;
844 	} else if (a >= c && b >= d) {
845 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
846 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
847 			best->p = p;
848 			best->n2 = n2;
849 			best->r2 = r2;
850 		}
851 	}
852 	/* Otherwise a < c && b >= d, do nothing */
853 }
854 
855 static void
856 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
857 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
858 {
859 	u64 freq2k;
860 	unsigned p, n2, r2;
861 	struct hsw_wrpll_rnp best = {};
862 	unsigned budget;
863 
864 	freq2k = clock / 100;
865 
866 	budget = hsw_wrpll_get_budget_for_freq(clock);
867 
868 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
869 	 * and directly pass the LC PLL to it. */
870 	if (freq2k == 5400000) {
871 		*n2_out = 2;
872 		*p_out = 1;
873 		*r2_out = 2;
874 		return;
875 	}
876 
877 	/*
878 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
879 	 * the WR PLL.
880 	 *
881 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
882 	 * Injecting R2 = 2 * R gives:
883 	 *   REF_MAX * r2 > LC_FREQ * 2 and
884 	 *   REF_MIN * r2 < LC_FREQ * 2
885 	 *
886 	 * Which means the desired boundaries for r2 are:
887 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
888 	 *
889 	 */
890 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
891 	     r2 <= LC_FREQ * 2 / REF_MIN;
892 	     r2++) {
893 
894 		/*
895 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
896 		 *
897 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
898 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
899 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
900 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
901 		 *
902 		 * Which means the desired boundaries for n2 are:
903 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
904 		 */
905 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
906 		     n2 <= VCO_MAX * r2 / LC_FREQ;
907 		     n2++) {
908 
909 			for (p = P_MIN; p <= P_MAX; p += P_INC)
910 				hsw_wrpll_update_rnp(freq2k, budget,
911 						     r2, n2, p, &best);
912 		}
913 	}
914 
915 	*n2_out = best.n2;
916 	*p_out = best.p;
917 	*r2_out = best.r2;
918 }
919 
920 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
921 				  const struct intel_shared_dpll *pll,
922 				  const struct intel_dpll_hw_state *pll_state)
923 {
924 	int refclk;
925 	int n, p, r;
926 	u32 wrpll = pll_state->wrpll;
927 
928 	switch (wrpll & WRPLL_REF_MASK) {
929 	case WRPLL_REF_SPECIAL_HSW:
930 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
931 		if (IS_HASWELL(dev_priv) && !IS_HASWELL_ULT(dev_priv)) {
932 			refclk = dev_priv->display.dpll.ref_clks.nssc;
933 			break;
934 		}
935 		fallthrough;
936 	case WRPLL_REF_PCH_SSC:
937 		/*
938 		 * We could calculate spread here, but our checking
939 		 * code only cares about 5% accuracy, and spread is a max of
940 		 * 0.5% downspread.
941 		 */
942 		refclk = dev_priv->display.dpll.ref_clks.ssc;
943 		break;
944 	case WRPLL_REF_LCPLL:
945 		refclk = 2700000;
946 		break;
947 	default:
948 		MISSING_CASE(wrpll);
949 		return 0;
950 	}
951 
952 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
953 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
954 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
955 
956 	/* Convert to KHz, p & r have a fixed point portion */
957 	return (refclk * n / 10) / (p * r) * 2;
958 }
959 
960 static int
961 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
962 			   struct intel_crtc *crtc)
963 {
964 	struct drm_i915_private *i915 = to_i915(state->base.dev);
965 	struct intel_crtc_state *crtc_state =
966 		intel_atomic_get_new_crtc_state(state, crtc);
967 	unsigned int p, n2, r2;
968 
969 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
970 
971 	crtc_state->dpll_hw_state.wrpll =
972 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
973 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
974 		WRPLL_DIVIDER_POST(p);
975 
976 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
977 							&crtc_state->dpll_hw_state);
978 
979 	return 0;
980 }
981 
982 static struct intel_shared_dpll *
983 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
984 		       struct intel_crtc *crtc)
985 {
986 	struct intel_crtc_state *crtc_state =
987 		intel_atomic_get_new_crtc_state(state, crtc);
988 
989 	return intel_find_shared_dpll(state, crtc,
990 				      &crtc_state->dpll_hw_state,
991 				      BIT(DPLL_ID_WRPLL2) |
992 				      BIT(DPLL_ID_WRPLL1));
993 }
994 
995 static int
996 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
997 {
998 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
999 	int clock = crtc_state->port_clock;
1000 
1001 	switch (clock / 2) {
1002 	case 81000:
1003 	case 135000:
1004 	case 270000:
1005 		return 0;
1006 	default:
1007 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
1008 			    clock);
1009 		return -EINVAL;
1010 	}
1011 }
1012 
1013 static struct intel_shared_dpll *
1014 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1015 {
1016 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1017 	struct intel_shared_dpll *pll;
1018 	enum intel_dpll_id pll_id;
1019 	int clock = crtc_state->port_clock;
1020 
1021 	switch (clock / 2) {
1022 	case 81000:
1023 		pll_id = DPLL_ID_LCPLL_810;
1024 		break;
1025 	case 135000:
1026 		pll_id = DPLL_ID_LCPLL_1350;
1027 		break;
1028 	case 270000:
1029 		pll_id = DPLL_ID_LCPLL_2700;
1030 		break;
1031 	default:
1032 		MISSING_CASE(clock / 2);
1033 		return NULL;
1034 	}
1035 
1036 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1037 
1038 	if (!pll)
1039 		return NULL;
1040 
1041 	return pll;
1042 }
1043 
1044 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1045 				  const struct intel_shared_dpll *pll,
1046 				  const struct intel_dpll_hw_state *pll_state)
1047 {
1048 	int link_clock = 0;
1049 
1050 	switch (pll->info->id) {
1051 	case DPLL_ID_LCPLL_810:
1052 		link_clock = 81000;
1053 		break;
1054 	case DPLL_ID_LCPLL_1350:
1055 		link_clock = 135000;
1056 		break;
1057 	case DPLL_ID_LCPLL_2700:
1058 		link_clock = 270000;
1059 		break;
1060 	default:
1061 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1062 		break;
1063 	}
1064 
1065 	return link_clock * 2;
1066 }
1067 
1068 static int
1069 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1070 			  struct intel_crtc *crtc)
1071 {
1072 	struct intel_crtc_state *crtc_state =
1073 		intel_atomic_get_new_crtc_state(state, crtc);
1074 
1075 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1076 		return -EINVAL;
1077 
1078 	crtc_state->dpll_hw_state.spll =
1079 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1080 
1081 	return 0;
1082 }
1083 
1084 static struct intel_shared_dpll *
1085 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1086 		      struct intel_crtc *crtc)
1087 {
1088 	struct intel_crtc_state *crtc_state =
1089 		intel_atomic_get_new_crtc_state(state, crtc);
1090 
1091 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1092 				      BIT(DPLL_ID_SPLL));
1093 }
1094 
1095 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1096 				 const struct intel_shared_dpll *pll,
1097 				 const struct intel_dpll_hw_state *pll_state)
1098 {
1099 	int link_clock = 0;
1100 
1101 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1102 	case SPLL_FREQ_810MHz:
1103 		link_clock = 81000;
1104 		break;
1105 	case SPLL_FREQ_1350MHz:
1106 		link_clock = 135000;
1107 		break;
1108 	case SPLL_FREQ_2700MHz:
1109 		link_clock = 270000;
1110 		break;
1111 	default:
1112 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1113 		break;
1114 	}
1115 
1116 	return link_clock * 2;
1117 }
1118 
1119 static int hsw_compute_dpll(struct intel_atomic_state *state,
1120 			    struct intel_crtc *crtc,
1121 			    struct intel_encoder *encoder)
1122 {
1123 	struct intel_crtc_state *crtc_state =
1124 		intel_atomic_get_new_crtc_state(state, crtc);
1125 
1126 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1127 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1128 	else if (intel_crtc_has_dp_encoder(crtc_state))
1129 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1130 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1131 		return hsw_ddi_spll_compute_dpll(state, crtc);
1132 	else
1133 		return -EINVAL;
1134 }
1135 
1136 static int hsw_get_dpll(struct intel_atomic_state *state,
1137 			struct intel_crtc *crtc,
1138 			struct intel_encoder *encoder)
1139 {
1140 	struct intel_crtc_state *crtc_state =
1141 		intel_atomic_get_new_crtc_state(state, crtc);
1142 	struct intel_shared_dpll *pll = NULL;
1143 
1144 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1145 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1146 	else if (intel_crtc_has_dp_encoder(crtc_state))
1147 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1148 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1149 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1150 
1151 	if (!pll)
1152 		return -EINVAL;
1153 
1154 	intel_reference_shared_dpll(state, crtc,
1155 				    pll, &crtc_state->dpll_hw_state);
1156 
1157 	crtc_state->shared_dpll = pll;
1158 
1159 	return 0;
1160 }
1161 
1162 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1163 {
1164 	i915->display.dpll.ref_clks.ssc = 135000;
1165 	/* Non-SSC is only used on non-ULT HSW. */
1166 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1167 		i915->display.dpll.ref_clks.nssc = 24000;
1168 	else
1169 		i915->display.dpll.ref_clks.nssc = 135000;
1170 }
1171 
1172 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1173 			      const struct intel_dpll_hw_state *hw_state)
1174 {
1175 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1176 		    hw_state->wrpll, hw_state->spll);
1177 }
1178 
1179 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1180 	.enable = hsw_ddi_wrpll_enable,
1181 	.disable = hsw_ddi_wrpll_disable,
1182 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1183 	.get_freq = hsw_ddi_wrpll_get_freq,
1184 };
1185 
1186 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1187 	.enable = hsw_ddi_spll_enable,
1188 	.disable = hsw_ddi_spll_disable,
1189 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1190 	.get_freq = hsw_ddi_spll_get_freq,
1191 };
1192 
1193 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1194 				 struct intel_shared_dpll *pll)
1195 {
1196 }
1197 
1198 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1199 				  struct intel_shared_dpll *pll)
1200 {
1201 }
1202 
1203 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1204 				       struct intel_shared_dpll *pll,
1205 				       struct intel_dpll_hw_state *hw_state)
1206 {
1207 	return true;
1208 }
1209 
1210 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1211 	.enable = hsw_ddi_lcpll_enable,
1212 	.disable = hsw_ddi_lcpll_disable,
1213 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1214 	.get_freq = hsw_ddi_lcpll_get_freq,
1215 };
1216 
1217 static const struct dpll_info hsw_plls[] = {
1218 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1219 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1220 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1221 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1222 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1223 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1224 	{ },
1225 };
1226 
1227 static const struct intel_dpll_mgr hsw_pll_mgr = {
1228 	.dpll_info = hsw_plls,
1229 	.compute_dplls = hsw_compute_dpll,
1230 	.get_dplls = hsw_get_dpll,
1231 	.put_dplls = intel_put_dpll,
1232 	.update_ref_clks = hsw_update_dpll_ref_clks,
1233 	.dump_hw_state = hsw_dump_hw_state,
1234 };
1235 
1236 struct skl_dpll_regs {
1237 	i915_reg_t ctl, cfgcr1, cfgcr2;
1238 };
1239 
1240 /* this array is indexed by the *shared* pll id */
1241 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1242 	{
1243 		/* DPLL 0 */
1244 		.ctl = LCPLL1_CTL,
1245 		/* DPLL 0 doesn't support HDMI mode */
1246 	},
1247 	{
1248 		/* DPLL 1 */
1249 		.ctl = LCPLL2_CTL,
1250 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1251 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1252 	},
1253 	{
1254 		/* DPLL 2 */
1255 		.ctl = WRPLL_CTL(0),
1256 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1257 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1258 	},
1259 	{
1260 		/* DPLL 3 */
1261 		.ctl = WRPLL_CTL(1),
1262 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1263 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1264 	},
1265 };
1266 
1267 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1268 				    struct intel_shared_dpll *pll)
1269 {
1270 	const enum intel_dpll_id id = pll->info->id;
1271 
1272 	intel_de_rmw(dev_priv, DPLL_CTRL1,
1273 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1274 		     pll->state.hw_state.ctrl1 << (id * 6));
1275 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1276 }
1277 
1278 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1279 			       struct intel_shared_dpll *pll)
1280 {
1281 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1282 	const enum intel_dpll_id id = pll->info->id;
1283 
1284 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1285 
1286 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1287 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1288 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1289 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1290 
1291 	/* the enable bit is always bit 31 */
1292 	intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1293 
1294 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1295 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1296 }
1297 
1298 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1299 				 struct intel_shared_dpll *pll)
1300 {
1301 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1302 }
1303 
1304 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1305 				struct intel_shared_dpll *pll)
1306 {
1307 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1308 	const enum intel_dpll_id id = pll->info->id;
1309 
1310 	/* the enable bit is always bit 31 */
1311 	intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1312 	intel_de_posting_read(dev_priv, regs[id].ctl);
1313 }
1314 
1315 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1316 				  struct intel_shared_dpll *pll)
1317 {
1318 }
1319 
1320 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1321 				     struct intel_shared_dpll *pll,
1322 				     struct intel_dpll_hw_state *hw_state)
1323 {
1324 	u32 val;
1325 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1326 	const enum intel_dpll_id id = pll->info->id;
1327 	intel_wakeref_t wakeref;
1328 	bool ret;
1329 
1330 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1331 						     POWER_DOMAIN_DISPLAY_CORE);
1332 	if (!wakeref)
1333 		return false;
1334 
1335 	ret = false;
1336 
1337 	val = intel_de_read(dev_priv, regs[id].ctl);
1338 	if (!(val & LCPLL_PLL_ENABLE))
1339 		goto out;
1340 
1341 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1342 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1343 
1344 	/* avoid reading back stale values if HDMI mode is not enabled */
1345 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1346 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1347 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1348 	}
1349 	ret = true;
1350 
1351 out:
1352 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1353 
1354 	return ret;
1355 }
1356 
1357 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1358 				       struct intel_shared_dpll *pll,
1359 				       struct intel_dpll_hw_state *hw_state)
1360 {
1361 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1362 	const enum intel_dpll_id id = pll->info->id;
1363 	intel_wakeref_t wakeref;
1364 	u32 val;
1365 	bool ret;
1366 
1367 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1368 						     POWER_DOMAIN_DISPLAY_CORE);
1369 	if (!wakeref)
1370 		return false;
1371 
1372 	ret = false;
1373 
1374 	/* DPLL0 is always enabled since it drives CDCLK */
1375 	val = intel_de_read(dev_priv, regs[id].ctl);
1376 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1377 		goto out;
1378 
1379 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1380 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1381 
1382 	ret = true;
1383 
1384 out:
1385 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1386 
1387 	return ret;
1388 }
1389 
1390 struct skl_wrpll_context {
1391 	u64 min_deviation;		/* current minimal deviation */
1392 	u64 central_freq;		/* chosen central freq */
1393 	u64 dco_freq;			/* chosen dco freq */
1394 	unsigned int p;			/* chosen divider */
1395 };
1396 
1397 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1398 #define SKL_DCO_MAX_PDEVIATION	100
1399 #define SKL_DCO_MAX_NDEVIATION	600
1400 
1401 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1402 				  u64 central_freq,
1403 				  u64 dco_freq,
1404 				  unsigned int divider)
1405 {
1406 	u64 deviation;
1407 
1408 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1409 			      central_freq);
1410 
1411 	/* positive deviation */
1412 	if (dco_freq >= central_freq) {
1413 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1414 		    deviation < ctx->min_deviation) {
1415 			ctx->min_deviation = deviation;
1416 			ctx->central_freq = central_freq;
1417 			ctx->dco_freq = dco_freq;
1418 			ctx->p = divider;
1419 		}
1420 	/* negative deviation */
1421 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1422 		   deviation < ctx->min_deviation) {
1423 		ctx->min_deviation = deviation;
1424 		ctx->central_freq = central_freq;
1425 		ctx->dco_freq = dco_freq;
1426 		ctx->p = divider;
1427 	}
1428 }
1429 
1430 static void skl_wrpll_get_multipliers(unsigned int p,
1431 				      unsigned int *p0 /* out */,
1432 				      unsigned int *p1 /* out */,
1433 				      unsigned int *p2 /* out */)
1434 {
1435 	/* even dividers */
1436 	if (p % 2 == 0) {
1437 		unsigned int half = p / 2;
1438 
1439 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1440 			*p0 = 2;
1441 			*p1 = 1;
1442 			*p2 = half;
1443 		} else if (half % 2 == 0) {
1444 			*p0 = 2;
1445 			*p1 = half / 2;
1446 			*p2 = 2;
1447 		} else if (half % 3 == 0) {
1448 			*p0 = 3;
1449 			*p1 = half / 3;
1450 			*p2 = 2;
1451 		} else if (half % 7 == 0) {
1452 			*p0 = 7;
1453 			*p1 = half / 7;
1454 			*p2 = 2;
1455 		}
1456 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1457 		*p0 = 3;
1458 		*p1 = 1;
1459 		*p2 = p / 3;
1460 	} else if (p == 5 || p == 7) {
1461 		*p0 = p;
1462 		*p1 = 1;
1463 		*p2 = 1;
1464 	} else if (p == 15) {
1465 		*p0 = 3;
1466 		*p1 = 1;
1467 		*p2 = 5;
1468 	} else if (p == 21) {
1469 		*p0 = 7;
1470 		*p1 = 1;
1471 		*p2 = 3;
1472 	} else if (p == 35) {
1473 		*p0 = 7;
1474 		*p1 = 1;
1475 		*p2 = 5;
1476 	}
1477 }
1478 
1479 struct skl_wrpll_params {
1480 	u32 dco_fraction;
1481 	u32 dco_integer;
1482 	u32 qdiv_ratio;
1483 	u32 qdiv_mode;
1484 	u32 kdiv;
1485 	u32 pdiv;
1486 	u32 central_freq;
1487 };
1488 
1489 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1490 				      u64 afe_clock,
1491 				      int ref_clock,
1492 				      u64 central_freq,
1493 				      u32 p0, u32 p1, u32 p2)
1494 {
1495 	u64 dco_freq;
1496 
1497 	switch (central_freq) {
1498 	case 9600000000ULL:
1499 		params->central_freq = 0;
1500 		break;
1501 	case 9000000000ULL:
1502 		params->central_freq = 1;
1503 		break;
1504 	case 8400000000ULL:
1505 		params->central_freq = 3;
1506 	}
1507 
1508 	switch (p0) {
1509 	case 1:
1510 		params->pdiv = 0;
1511 		break;
1512 	case 2:
1513 		params->pdiv = 1;
1514 		break;
1515 	case 3:
1516 		params->pdiv = 2;
1517 		break;
1518 	case 7:
1519 		params->pdiv = 4;
1520 		break;
1521 	default:
1522 		WARN(1, "Incorrect PDiv\n");
1523 	}
1524 
1525 	switch (p2) {
1526 	case 5:
1527 		params->kdiv = 0;
1528 		break;
1529 	case 2:
1530 		params->kdiv = 1;
1531 		break;
1532 	case 3:
1533 		params->kdiv = 2;
1534 		break;
1535 	case 1:
1536 		params->kdiv = 3;
1537 		break;
1538 	default:
1539 		WARN(1, "Incorrect KDiv\n");
1540 	}
1541 
1542 	params->qdiv_ratio = p1;
1543 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1544 
1545 	dco_freq = p0 * p1 * p2 * afe_clock;
1546 
1547 	/*
1548 	 * Intermediate values are in Hz.
1549 	 * Divide by MHz to match bsepc
1550 	 */
1551 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1552 	params->dco_fraction =
1553 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1554 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1555 }
1556 
1557 static int
1558 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1559 			int ref_clock,
1560 			struct skl_wrpll_params *wrpll_params)
1561 {
1562 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1563 						 9000000000ULL,
1564 						 9600000000ULL };
1565 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1566 					    24, 28, 30, 32, 36, 40, 42, 44,
1567 					    48, 52, 54, 56, 60, 64, 66, 68,
1568 					    70, 72, 76, 78, 80, 84, 88, 90,
1569 					    92, 96, 98 };
1570 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1571 	static const struct {
1572 		const u8 *list;
1573 		int n_dividers;
1574 	} dividers[] = {
1575 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1576 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1577 	};
1578 	struct skl_wrpll_context ctx = {
1579 		.min_deviation = U64_MAX,
1580 	};
1581 	unsigned int dco, d, i;
1582 	unsigned int p0, p1, p2;
1583 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1584 
1585 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1586 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1587 			for (i = 0; i < dividers[d].n_dividers; i++) {
1588 				unsigned int p = dividers[d].list[i];
1589 				u64 dco_freq = p * afe_clock;
1590 
1591 				skl_wrpll_try_divider(&ctx,
1592 						      dco_central_freq[dco],
1593 						      dco_freq,
1594 						      p);
1595 				/*
1596 				 * Skip the remaining dividers if we're sure to
1597 				 * have found the definitive divider, we can't
1598 				 * improve a 0 deviation.
1599 				 */
1600 				if (ctx.min_deviation == 0)
1601 					goto skip_remaining_dividers;
1602 			}
1603 		}
1604 
1605 skip_remaining_dividers:
1606 		/*
1607 		 * If a solution is found with an even divider, prefer
1608 		 * this one.
1609 		 */
1610 		if (d == 0 && ctx.p)
1611 			break;
1612 	}
1613 
1614 	if (!ctx.p)
1615 		return -EINVAL;
1616 
1617 	/*
1618 	 * gcc incorrectly analyses that these can be used without being
1619 	 * initialized. To be fair, it's hard to guess.
1620 	 */
1621 	p0 = p1 = p2 = 0;
1622 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1623 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1624 				  ctx.central_freq, p0, p1, p2);
1625 
1626 	return 0;
1627 }
1628 
1629 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1630 				  const struct intel_shared_dpll *pll,
1631 				  const struct intel_dpll_hw_state *pll_state)
1632 {
1633 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1634 	u32 p0, p1, p2, dco_freq;
1635 
1636 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1637 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1638 
1639 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1640 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1641 	else
1642 		p1 = 1;
1643 
1644 
1645 	switch (p0) {
1646 	case DPLL_CFGCR2_PDIV_1:
1647 		p0 = 1;
1648 		break;
1649 	case DPLL_CFGCR2_PDIV_2:
1650 		p0 = 2;
1651 		break;
1652 	case DPLL_CFGCR2_PDIV_3:
1653 		p0 = 3;
1654 		break;
1655 	case DPLL_CFGCR2_PDIV_7_INVALID:
1656 		/*
1657 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1658 		 * handling it the same way as PDIV_7.
1659 		 */
1660 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1661 		fallthrough;
1662 	case DPLL_CFGCR2_PDIV_7:
1663 		p0 = 7;
1664 		break;
1665 	default:
1666 		MISSING_CASE(p0);
1667 		return 0;
1668 	}
1669 
1670 	switch (p2) {
1671 	case DPLL_CFGCR2_KDIV_5:
1672 		p2 = 5;
1673 		break;
1674 	case DPLL_CFGCR2_KDIV_2:
1675 		p2 = 2;
1676 		break;
1677 	case DPLL_CFGCR2_KDIV_3:
1678 		p2 = 3;
1679 		break;
1680 	case DPLL_CFGCR2_KDIV_1:
1681 		p2 = 1;
1682 		break;
1683 	default:
1684 		MISSING_CASE(p2);
1685 		return 0;
1686 	}
1687 
1688 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1689 		   ref_clock;
1690 
1691 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1692 		    ref_clock / 0x8000;
1693 
1694 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1695 		return 0;
1696 
1697 	return dco_freq / (p0 * p1 * p2 * 5);
1698 }
1699 
1700 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1701 {
1702 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1703 	struct skl_wrpll_params wrpll_params = {};
1704 	u32 ctrl1, cfgcr1, cfgcr2;
1705 	int ret;
1706 
1707 	/*
1708 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1709 	 * as the DPLL id in this function.
1710 	 */
1711 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1712 
1713 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1714 
1715 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1716 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1717 	if (ret)
1718 		return ret;
1719 
1720 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1721 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1722 		wrpll_params.dco_integer;
1723 
1724 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1725 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1726 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1727 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1728 		wrpll_params.central_freq;
1729 
1730 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1731 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1732 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1733 
1734 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1735 							&crtc_state->dpll_hw_state);
1736 
1737 	return 0;
1738 }
1739 
1740 static int
1741 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1742 {
1743 	u32 ctrl1;
1744 
1745 	/*
1746 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1747 	 * as the DPLL id in this function.
1748 	 */
1749 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1750 	switch (crtc_state->port_clock / 2) {
1751 	case 81000:
1752 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1753 		break;
1754 	case 135000:
1755 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1756 		break;
1757 	case 270000:
1758 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1759 		break;
1760 		/* eDP 1.4 rates */
1761 	case 162000:
1762 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1763 		break;
1764 	case 108000:
1765 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1766 		break;
1767 	case 216000:
1768 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1769 		break;
1770 	}
1771 
1772 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1773 
1774 	return 0;
1775 }
1776 
1777 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1778 				  const struct intel_shared_dpll *pll,
1779 				  const struct intel_dpll_hw_state *pll_state)
1780 {
1781 	int link_clock = 0;
1782 
1783 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1784 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1785 	case DPLL_CTRL1_LINK_RATE_810:
1786 		link_clock = 81000;
1787 		break;
1788 	case DPLL_CTRL1_LINK_RATE_1080:
1789 		link_clock = 108000;
1790 		break;
1791 	case DPLL_CTRL1_LINK_RATE_1350:
1792 		link_clock = 135000;
1793 		break;
1794 	case DPLL_CTRL1_LINK_RATE_1620:
1795 		link_clock = 162000;
1796 		break;
1797 	case DPLL_CTRL1_LINK_RATE_2160:
1798 		link_clock = 216000;
1799 		break;
1800 	case DPLL_CTRL1_LINK_RATE_2700:
1801 		link_clock = 270000;
1802 		break;
1803 	default:
1804 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1805 		break;
1806 	}
1807 
1808 	return link_clock * 2;
1809 }
1810 
1811 static int skl_compute_dpll(struct intel_atomic_state *state,
1812 			    struct intel_crtc *crtc,
1813 			    struct intel_encoder *encoder)
1814 {
1815 	struct intel_crtc_state *crtc_state =
1816 		intel_atomic_get_new_crtc_state(state, crtc);
1817 
1818 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1819 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1820 	else if (intel_crtc_has_dp_encoder(crtc_state))
1821 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1822 	else
1823 		return -EINVAL;
1824 }
1825 
1826 static int skl_get_dpll(struct intel_atomic_state *state,
1827 			struct intel_crtc *crtc,
1828 			struct intel_encoder *encoder)
1829 {
1830 	struct intel_crtc_state *crtc_state =
1831 		intel_atomic_get_new_crtc_state(state, crtc);
1832 	struct intel_shared_dpll *pll;
1833 
1834 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1835 		pll = intel_find_shared_dpll(state, crtc,
1836 					     &crtc_state->dpll_hw_state,
1837 					     BIT(DPLL_ID_SKL_DPLL0));
1838 	else
1839 		pll = intel_find_shared_dpll(state, crtc,
1840 					     &crtc_state->dpll_hw_state,
1841 					     BIT(DPLL_ID_SKL_DPLL3) |
1842 					     BIT(DPLL_ID_SKL_DPLL2) |
1843 					     BIT(DPLL_ID_SKL_DPLL1));
1844 	if (!pll)
1845 		return -EINVAL;
1846 
1847 	intel_reference_shared_dpll(state, crtc,
1848 				    pll, &crtc_state->dpll_hw_state);
1849 
1850 	crtc_state->shared_dpll = pll;
1851 
1852 	return 0;
1853 }
1854 
1855 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1856 				const struct intel_shared_dpll *pll,
1857 				const struct intel_dpll_hw_state *pll_state)
1858 {
1859 	/*
1860 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1861 	 * the internal shift for each field
1862 	 */
1863 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1864 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1865 	else
1866 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1867 }
1868 
1869 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1870 {
1871 	/* No SSC ref */
1872 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1873 }
1874 
1875 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1876 			      const struct intel_dpll_hw_state *hw_state)
1877 {
1878 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1879 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1880 		      hw_state->ctrl1,
1881 		      hw_state->cfgcr1,
1882 		      hw_state->cfgcr2);
1883 }
1884 
1885 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1886 	.enable = skl_ddi_pll_enable,
1887 	.disable = skl_ddi_pll_disable,
1888 	.get_hw_state = skl_ddi_pll_get_hw_state,
1889 	.get_freq = skl_ddi_pll_get_freq,
1890 };
1891 
1892 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1893 	.enable = skl_ddi_dpll0_enable,
1894 	.disable = skl_ddi_dpll0_disable,
1895 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1896 	.get_freq = skl_ddi_pll_get_freq,
1897 };
1898 
1899 static const struct dpll_info skl_plls[] = {
1900 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1901 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1902 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1903 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1904 	{ },
1905 };
1906 
1907 static const struct intel_dpll_mgr skl_pll_mgr = {
1908 	.dpll_info = skl_plls,
1909 	.compute_dplls = skl_compute_dpll,
1910 	.get_dplls = skl_get_dpll,
1911 	.put_dplls = intel_put_dpll,
1912 	.update_ref_clks = skl_update_dpll_ref_clks,
1913 	.dump_hw_state = skl_dump_hw_state,
1914 };
1915 
1916 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1917 				struct intel_shared_dpll *pll)
1918 {
1919 	u32 temp;
1920 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1921 	enum dpio_phy phy;
1922 	enum dpio_channel ch;
1923 
1924 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1925 
1926 	/* Non-SSC reference */
1927 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1928 
1929 	if (IS_GEMINILAKE(dev_priv)) {
1930 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1931 			     0, PORT_PLL_POWER_ENABLE);
1932 
1933 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1934 				 PORT_PLL_POWER_STATE), 200))
1935 			drm_err(&dev_priv->drm,
1936 				"Power state not set for PLL:%d\n", port);
1937 	}
1938 
1939 	/* Disable 10 bit clock */
1940 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
1941 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1942 
1943 	/* Write P1 & P2 */
1944 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
1945 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1946 
1947 	/* Write M2 integer */
1948 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
1949 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1950 
1951 	/* Write N */
1952 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
1953 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1954 
1955 	/* Write M2 fraction */
1956 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
1957 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1958 
1959 	/* Write M2 fraction enable */
1960 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
1961 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1962 
1963 	/* Write coeff */
1964 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1965 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1966 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1967 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1968 	temp |= pll->state.hw_state.pll6;
1969 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1970 
1971 	/* Write calibration val */
1972 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
1973 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1974 
1975 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
1976 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1977 
1978 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1979 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1980 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1981 	temp |= pll->state.hw_state.pll10;
1982 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1983 
1984 	/* Recalibrate with new settings */
1985 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1986 	temp |= PORT_PLL_RECALIBRATE;
1987 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1988 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1989 	temp |= pll->state.hw_state.ebb4;
1990 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1991 
1992 	/* Enable PLL */
1993 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1994 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1995 
1996 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1997 			200))
1998 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1999 
2000 	if (IS_GEMINILAKE(dev_priv)) {
2001 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2002 		temp |= DCC_DELAY_RANGE_2;
2003 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2004 	}
2005 
2006 	/*
2007 	 * While we write to the group register to program all lanes at once we
2008 	 * can read only lane registers and we pick lanes 0/1 for that.
2009 	 */
2010 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2011 	temp &= ~LANE_STAGGER_MASK;
2012 	temp &= ~LANESTAGGER_STRAP_OVRD;
2013 	temp |= pll->state.hw_state.pcsdw12;
2014 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2015 }
2016 
2017 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2018 					struct intel_shared_dpll *pll)
2019 {
2020 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2021 
2022 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2023 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2024 
2025 	if (IS_GEMINILAKE(dev_priv)) {
2026 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
2027 			     PORT_PLL_POWER_ENABLE, 0);
2028 
2029 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2030 				  PORT_PLL_POWER_STATE), 200))
2031 			drm_err(&dev_priv->drm,
2032 				"Power state not reset for PLL:%d\n", port);
2033 	}
2034 }
2035 
2036 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2037 					struct intel_shared_dpll *pll,
2038 					struct intel_dpll_hw_state *hw_state)
2039 {
2040 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2041 	intel_wakeref_t wakeref;
2042 	enum dpio_phy phy;
2043 	enum dpio_channel ch;
2044 	u32 val;
2045 	bool ret;
2046 
2047 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2048 
2049 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2050 						     POWER_DOMAIN_DISPLAY_CORE);
2051 	if (!wakeref)
2052 		return false;
2053 
2054 	ret = false;
2055 
2056 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2057 	if (!(val & PORT_PLL_ENABLE))
2058 		goto out;
2059 
2060 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2061 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2062 
2063 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2064 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2065 
2066 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2067 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2068 
2069 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2070 	hw_state->pll1 &= PORT_PLL_N_MASK;
2071 
2072 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2073 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2074 
2075 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2076 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2077 
2078 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2079 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2080 			  PORT_PLL_INT_COEFF_MASK |
2081 			  PORT_PLL_GAIN_CTL_MASK;
2082 
2083 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2084 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2085 
2086 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2087 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2088 
2089 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2090 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2091 			   PORT_PLL_DCO_AMP_MASK;
2092 
2093 	/*
2094 	 * While we write to the group register to program all lanes at once we
2095 	 * can read only lane registers. We configure all lanes the same way, so
2096 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2097 	 */
2098 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2099 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2100 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2101 		drm_dbg(&dev_priv->drm,
2102 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2103 			hw_state->pcsdw12,
2104 			intel_de_read(dev_priv,
2105 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2106 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2107 
2108 	ret = true;
2109 
2110 out:
2111 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2112 
2113 	return ret;
2114 }
2115 
2116 /* pre-calculated values for DP linkrates */
2117 static const struct dpll bxt_dp_clk_val[] = {
2118 	/* m2 is .22 binary fixed point */
2119 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2120 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2121 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2122 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2123 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2124 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2125 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2126 };
2127 
2128 static int
2129 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2130 			  struct dpll *clk_div)
2131 {
2132 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2133 
2134 	/* Calculate HDMI div */
2135 	/*
2136 	 * FIXME: tie the following calculation into
2137 	 * i9xx_crtc_compute_clock
2138 	 */
2139 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2140 		return -EINVAL;
2141 
2142 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2143 
2144 	return 0;
2145 }
2146 
2147 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2148 				    struct dpll *clk_div)
2149 {
2150 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2151 	int i;
2152 
2153 	*clk_div = bxt_dp_clk_val[0];
2154 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2155 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2156 			*clk_div = bxt_dp_clk_val[i];
2157 			break;
2158 		}
2159 	}
2160 
2161 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2162 
2163 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2164 		    clk_div->dot != crtc_state->port_clock);
2165 }
2166 
2167 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2168 				     const struct dpll *clk_div)
2169 {
2170 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2171 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2172 	int clock = crtc_state->port_clock;
2173 	int vco = clk_div->vco;
2174 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2175 	u32 lanestagger;
2176 
2177 	if (vco >= 6200000 && vco <= 6700000) {
2178 		prop_coef = 4;
2179 		int_coef = 9;
2180 		gain_ctl = 3;
2181 		targ_cnt = 8;
2182 	} else if ((vco > 5400000 && vco < 6200000) ||
2183 			(vco >= 4800000 && vco < 5400000)) {
2184 		prop_coef = 5;
2185 		int_coef = 11;
2186 		gain_ctl = 3;
2187 		targ_cnt = 9;
2188 	} else if (vco == 5400000) {
2189 		prop_coef = 3;
2190 		int_coef = 8;
2191 		gain_ctl = 1;
2192 		targ_cnt = 9;
2193 	} else {
2194 		drm_err(&i915->drm, "Invalid VCO\n");
2195 		return -EINVAL;
2196 	}
2197 
2198 	if (clock > 270000)
2199 		lanestagger = 0x18;
2200 	else if (clock > 135000)
2201 		lanestagger = 0x0d;
2202 	else if (clock > 67000)
2203 		lanestagger = 0x07;
2204 	else if (clock > 33000)
2205 		lanestagger = 0x04;
2206 	else
2207 		lanestagger = 0x02;
2208 
2209 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2210 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2211 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2212 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2213 
2214 	if (clk_div->m2 & 0x3fffff)
2215 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2216 
2217 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2218 		PORT_PLL_INT_COEFF(int_coef) |
2219 		PORT_PLL_GAIN_CTL(gain_ctl);
2220 
2221 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2222 
2223 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2224 
2225 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2226 		PORT_PLL_DCO_AMP_OVR_EN_H;
2227 
2228 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2229 
2230 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2231 
2232 	return 0;
2233 }
2234 
2235 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2236 				const struct intel_shared_dpll *pll,
2237 				const struct intel_dpll_hw_state *pll_state)
2238 {
2239 	struct dpll clock;
2240 
2241 	clock.m1 = 2;
2242 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2243 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2244 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2245 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2246 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2247 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2248 
2249 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2250 }
2251 
2252 static int
2253 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2254 {
2255 	struct dpll clk_div = {};
2256 
2257 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2258 
2259 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2260 }
2261 
2262 static int
2263 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2264 {
2265 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2266 	struct dpll clk_div = {};
2267 	int ret;
2268 
2269 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2270 
2271 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2272 	if (ret)
2273 		return ret;
2274 
2275 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2276 						      &crtc_state->dpll_hw_state);
2277 
2278 	return 0;
2279 }
2280 
2281 static int bxt_compute_dpll(struct intel_atomic_state *state,
2282 			    struct intel_crtc *crtc,
2283 			    struct intel_encoder *encoder)
2284 {
2285 	struct intel_crtc_state *crtc_state =
2286 		intel_atomic_get_new_crtc_state(state, crtc);
2287 
2288 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2289 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2290 	else if (intel_crtc_has_dp_encoder(crtc_state))
2291 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2292 	else
2293 		return -EINVAL;
2294 }
2295 
2296 static int bxt_get_dpll(struct intel_atomic_state *state,
2297 			struct intel_crtc *crtc,
2298 			struct intel_encoder *encoder)
2299 {
2300 	struct intel_crtc_state *crtc_state =
2301 		intel_atomic_get_new_crtc_state(state, crtc);
2302 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2303 	struct intel_shared_dpll *pll;
2304 	enum intel_dpll_id id;
2305 
2306 	/* 1:1 mapping between ports and PLLs */
2307 	id = (enum intel_dpll_id) encoder->port;
2308 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2309 
2310 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2311 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2312 
2313 	intel_reference_shared_dpll(state, crtc,
2314 				    pll, &crtc_state->dpll_hw_state);
2315 
2316 	crtc_state->shared_dpll = pll;
2317 
2318 	return 0;
2319 }
2320 
2321 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2322 {
2323 	i915->display.dpll.ref_clks.ssc = 100000;
2324 	i915->display.dpll.ref_clks.nssc = 100000;
2325 	/* DSI non-SSC ref 19.2MHz */
2326 }
2327 
2328 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2329 			      const struct intel_dpll_hw_state *hw_state)
2330 {
2331 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2332 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2333 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2334 		    hw_state->ebb0,
2335 		    hw_state->ebb4,
2336 		    hw_state->pll0,
2337 		    hw_state->pll1,
2338 		    hw_state->pll2,
2339 		    hw_state->pll3,
2340 		    hw_state->pll6,
2341 		    hw_state->pll8,
2342 		    hw_state->pll9,
2343 		    hw_state->pll10,
2344 		    hw_state->pcsdw12);
2345 }
2346 
2347 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2348 	.enable = bxt_ddi_pll_enable,
2349 	.disable = bxt_ddi_pll_disable,
2350 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2351 	.get_freq = bxt_ddi_pll_get_freq,
2352 };
2353 
2354 static const struct dpll_info bxt_plls[] = {
2355 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2356 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2357 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2358 	{ },
2359 };
2360 
2361 static const struct intel_dpll_mgr bxt_pll_mgr = {
2362 	.dpll_info = bxt_plls,
2363 	.compute_dplls = bxt_compute_dpll,
2364 	.get_dplls = bxt_get_dpll,
2365 	.put_dplls = intel_put_dpll,
2366 	.update_ref_clks = bxt_update_dpll_ref_clks,
2367 	.dump_hw_state = bxt_dump_hw_state,
2368 };
2369 
2370 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2371 				      int *qdiv, int *kdiv)
2372 {
2373 	/* even dividers */
2374 	if (bestdiv % 2 == 0) {
2375 		if (bestdiv == 2) {
2376 			*pdiv = 2;
2377 			*qdiv = 1;
2378 			*kdiv = 1;
2379 		} else if (bestdiv % 4 == 0) {
2380 			*pdiv = 2;
2381 			*qdiv = bestdiv / 4;
2382 			*kdiv = 2;
2383 		} else if (bestdiv % 6 == 0) {
2384 			*pdiv = 3;
2385 			*qdiv = bestdiv / 6;
2386 			*kdiv = 2;
2387 		} else if (bestdiv % 5 == 0) {
2388 			*pdiv = 5;
2389 			*qdiv = bestdiv / 10;
2390 			*kdiv = 2;
2391 		} else if (bestdiv % 14 == 0) {
2392 			*pdiv = 7;
2393 			*qdiv = bestdiv / 14;
2394 			*kdiv = 2;
2395 		}
2396 	} else {
2397 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2398 			*pdiv = bestdiv;
2399 			*qdiv = 1;
2400 			*kdiv = 1;
2401 		} else { /* 9, 15, 21 */
2402 			*pdiv = bestdiv / 3;
2403 			*qdiv = 1;
2404 			*kdiv = 3;
2405 		}
2406 	}
2407 }
2408 
2409 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2410 				      u32 dco_freq, u32 ref_freq,
2411 				      int pdiv, int qdiv, int kdiv)
2412 {
2413 	u32 dco;
2414 
2415 	switch (kdiv) {
2416 	case 1:
2417 		params->kdiv = 1;
2418 		break;
2419 	case 2:
2420 		params->kdiv = 2;
2421 		break;
2422 	case 3:
2423 		params->kdiv = 4;
2424 		break;
2425 	default:
2426 		WARN(1, "Incorrect KDiv\n");
2427 	}
2428 
2429 	switch (pdiv) {
2430 	case 2:
2431 		params->pdiv = 1;
2432 		break;
2433 	case 3:
2434 		params->pdiv = 2;
2435 		break;
2436 	case 5:
2437 		params->pdiv = 4;
2438 		break;
2439 	case 7:
2440 		params->pdiv = 8;
2441 		break;
2442 	default:
2443 		WARN(1, "Incorrect PDiv\n");
2444 	}
2445 
2446 	WARN_ON(kdiv != 2 && qdiv != 1);
2447 
2448 	params->qdiv_ratio = qdiv;
2449 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2450 
2451 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2452 
2453 	params->dco_integer = dco >> 15;
2454 	params->dco_fraction = dco & 0x7fff;
2455 }
2456 
2457 /*
2458  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2459  * Program half of the nominal DCO divider fraction value.
2460  */
2461 static bool
2462 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2463 {
2464 	return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
2465 		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2466 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2467 		 i915->display.dpll.ref_clks.nssc == 38400;
2468 }
2469 
2470 struct icl_combo_pll_params {
2471 	int clock;
2472 	struct skl_wrpll_params wrpll;
2473 };
2474 
2475 /*
2476  * These values alrea already adjusted: they're the bits we write to the
2477  * registers, not the logical values.
2478  */
2479 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2480 	{ 540000,
2481 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2482 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2483 	{ 270000,
2484 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2485 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2486 	{ 162000,
2487 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2488 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2489 	{ 324000,
2490 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2491 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2492 	{ 216000,
2493 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2494 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2495 	{ 432000,
2496 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2497 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2498 	{ 648000,
2499 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2500 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2501 	{ 810000,
2502 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2503 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2504 };
2505 
2506 
2507 /* Also used for 38.4 MHz values. */
2508 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2509 	{ 540000,
2510 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2511 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 	{ 270000,
2513 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2514 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 	{ 162000,
2516 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2517 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2518 	{ 324000,
2519 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2520 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 	{ 216000,
2522 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2523 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2524 	{ 432000,
2525 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2526 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2527 	{ 648000,
2528 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2529 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2530 	{ 810000,
2531 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2532 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2533 };
2534 
2535 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2536 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2537 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2538 };
2539 
2540 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2541 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2542 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2543 };
2544 
2545 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2546 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2547 	/* the following params are unused */
2548 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2549 };
2550 
2551 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2552 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2553 	/* the following params are unused */
2554 };
2555 
2556 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2557 				 struct skl_wrpll_params *pll_params)
2558 {
2559 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2560 	const struct icl_combo_pll_params *params =
2561 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2562 		icl_dp_combo_pll_24MHz_values :
2563 		icl_dp_combo_pll_19_2MHz_values;
2564 	int clock = crtc_state->port_clock;
2565 	int i;
2566 
2567 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2568 		if (clock == params[i].clock) {
2569 			*pll_params = params[i].wrpll;
2570 			return 0;
2571 		}
2572 	}
2573 
2574 	MISSING_CASE(clock);
2575 	return -EINVAL;
2576 }
2577 
2578 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2579 			    struct skl_wrpll_params *pll_params)
2580 {
2581 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2582 
2583 	if (DISPLAY_VER(dev_priv) >= 12) {
2584 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2585 		default:
2586 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2587 			fallthrough;
2588 		case 19200:
2589 		case 38400:
2590 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2591 			break;
2592 		case 24000:
2593 			*pll_params = tgl_tbt_pll_24MHz_values;
2594 			break;
2595 		}
2596 	} else {
2597 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2598 		default:
2599 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2600 			fallthrough;
2601 		case 19200:
2602 		case 38400:
2603 			*pll_params = icl_tbt_pll_19_2MHz_values;
2604 			break;
2605 		case 24000:
2606 			*pll_params = icl_tbt_pll_24MHz_values;
2607 			break;
2608 		}
2609 	}
2610 
2611 	return 0;
2612 }
2613 
2614 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2615 				    const struct intel_shared_dpll *pll,
2616 				    const struct intel_dpll_hw_state *pll_state)
2617 {
2618 	/*
2619 	 * The PLL outputs multiple frequencies at the same time, selection is
2620 	 * made at DDI clock mux level.
2621 	 */
2622 	drm_WARN_ON(&i915->drm, 1);
2623 
2624 	return 0;
2625 }
2626 
2627 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2628 {
2629 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2630 
2631 	/*
2632 	 * For ICL+, the spec states: if reference frequency is 38.4,
2633 	 * use 19.2 because the DPLL automatically divides that by 2.
2634 	 */
2635 	if (ref_clock == 38400)
2636 		ref_clock = 19200;
2637 
2638 	return ref_clock;
2639 }
2640 
2641 static int
2642 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2643 	       struct skl_wrpll_params *wrpll_params)
2644 {
2645 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2646 	int ref_clock = icl_wrpll_ref_clock(i915);
2647 	u32 afe_clock = crtc_state->port_clock * 5;
2648 	u32 dco_min = 7998000;
2649 	u32 dco_max = 10000000;
2650 	u32 dco_mid = (dco_min + dco_max) / 2;
2651 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2652 					 18, 20, 24, 28, 30, 32,  36,  40,
2653 					 42, 44, 48, 50, 52, 54,  56,  60,
2654 					 64, 66, 68, 70, 72, 76,  78,  80,
2655 					 84, 88, 90, 92, 96, 98, 100, 102,
2656 					  3,  5,  7,  9, 15, 21 };
2657 	u32 dco, best_dco = 0, dco_centrality = 0;
2658 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2659 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2660 
2661 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2662 		dco = afe_clock * dividers[d];
2663 
2664 		if (dco <= dco_max && dco >= dco_min) {
2665 			dco_centrality = abs(dco - dco_mid);
2666 
2667 			if (dco_centrality < best_dco_centrality) {
2668 				best_dco_centrality = dco_centrality;
2669 				best_div = dividers[d];
2670 				best_dco = dco;
2671 			}
2672 		}
2673 	}
2674 
2675 	if (best_div == 0)
2676 		return -EINVAL;
2677 
2678 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2679 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2680 				  pdiv, qdiv, kdiv);
2681 
2682 	return 0;
2683 }
2684 
2685 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2686 				      const struct intel_shared_dpll *pll,
2687 				      const struct intel_dpll_hw_state *pll_state)
2688 {
2689 	int ref_clock = icl_wrpll_ref_clock(i915);
2690 	u32 dco_fraction;
2691 	u32 p0, p1, p2, dco_freq;
2692 
2693 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2694 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2695 
2696 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2697 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2698 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2699 	else
2700 		p1 = 1;
2701 
2702 	switch (p0) {
2703 	case DPLL_CFGCR1_PDIV_2:
2704 		p0 = 2;
2705 		break;
2706 	case DPLL_CFGCR1_PDIV_3:
2707 		p0 = 3;
2708 		break;
2709 	case DPLL_CFGCR1_PDIV_5:
2710 		p0 = 5;
2711 		break;
2712 	case DPLL_CFGCR1_PDIV_7:
2713 		p0 = 7;
2714 		break;
2715 	}
2716 
2717 	switch (p2) {
2718 	case DPLL_CFGCR1_KDIV_1:
2719 		p2 = 1;
2720 		break;
2721 	case DPLL_CFGCR1_KDIV_2:
2722 		p2 = 2;
2723 		break;
2724 	case DPLL_CFGCR1_KDIV_3:
2725 		p2 = 3;
2726 		break;
2727 	}
2728 
2729 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2730 		   ref_clock;
2731 
2732 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2733 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2734 
2735 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2736 		dco_fraction *= 2;
2737 
2738 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2739 
2740 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2741 		return 0;
2742 
2743 	return dco_freq / (p0 * p1 * p2 * 5);
2744 }
2745 
2746 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2747 				const struct skl_wrpll_params *pll_params,
2748 				struct intel_dpll_hw_state *pll_state)
2749 {
2750 	u32 dco_fraction = pll_params->dco_fraction;
2751 
2752 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2753 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2754 
2755 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2756 			    pll_params->dco_integer;
2757 
2758 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2759 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2760 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2761 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2762 
2763 	if (DISPLAY_VER(i915) >= 12)
2764 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2765 	else
2766 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2767 
2768 	if (i915->display.vbt.override_afc_startup)
2769 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2770 }
2771 
2772 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2773 				    u32 *target_dco_khz,
2774 				    struct intel_dpll_hw_state *state,
2775 				    bool is_dkl)
2776 {
2777 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2778 	u32 dco_min_freq, dco_max_freq;
2779 	unsigned int i;
2780 	int div2;
2781 
2782 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2783 	dco_max_freq = is_dp ? 8100000 : 10000000;
2784 
2785 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2786 		int div1 = div1_vals[i];
2787 
2788 		for (div2 = 10; div2 > 0; div2--) {
2789 			int dco = div1 * div2 * clock_khz * 5;
2790 			int a_divratio, tlinedrv, inputsel;
2791 			u32 hsdiv;
2792 
2793 			if (dco < dco_min_freq || dco > dco_max_freq)
2794 				continue;
2795 
2796 			if (div2 >= 2) {
2797 				/*
2798 				 * Note: a_divratio not matching TGL BSpec
2799 				 * algorithm but matching hardcoded values and
2800 				 * working on HW for DP alt-mode at least
2801 				 */
2802 				a_divratio = is_dp ? 10 : 5;
2803 				tlinedrv = is_dkl ? 1 : 2;
2804 			} else {
2805 				a_divratio = 5;
2806 				tlinedrv = 0;
2807 			}
2808 			inputsel = is_dp ? 0 : 1;
2809 
2810 			switch (div1) {
2811 			default:
2812 				MISSING_CASE(div1);
2813 				fallthrough;
2814 			case 2:
2815 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2816 				break;
2817 			case 3:
2818 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2819 				break;
2820 			case 5:
2821 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2822 				break;
2823 			case 7:
2824 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2825 				break;
2826 			}
2827 
2828 			*target_dco_khz = dco;
2829 
2830 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2831 
2832 			state->mg_clktop2_coreclkctl1 =
2833 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2834 
2835 			state->mg_clktop2_hsclkctl =
2836 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2837 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2838 				hsdiv |
2839 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2840 
2841 			return 0;
2842 		}
2843 	}
2844 
2845 	return -EINVAL;
2846 }
2847 
2848 /*
2849  * The specification for this function uses real numbers, so the math had to be
2850  * adapted to integer-only calculation, that's why it looks so different.
2851  */
2852 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2853 				 struct intel_dpll_hw_state *pll_state)
2854 {
2855 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2856 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2857 	int clock = crtc_state->port_clock;
2858 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2859 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2860 	u32 prop_coeff, int_coeff;
2861 	u32 tdc_targetcnt, feedfwgain;
2862 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2863 	u64 tmp;
2864 	bool use_ssc = false;
2865 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2866 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2867 	int ret;
2868 
2869 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2870 				       pll_state, is_dkl);
2871 	if (ret)
2872 		return ret;
2873 
2874 	m1div = 2;
2875 	m2div_int = dco_khz / (refclk_khz * m1div);
2876 	if (m2div_int > 255) {
2877 		if (!is_dkl) {
2878 			m1div = 4;
2879 			m2div_int = dco_khz / (refclk_khz * m1div);
2880 		}
2881 
2882 		if (m2div_int > 255)
2883 			return -EINVAL;
2884 	}
2885 	m2div_rem = dco_khz % (refclk_khz * m1div);
2886 
2887 	tmp = (u64)m2div_rem * (1 << 22);
2888 	do_div(tmp, refclk_khz * m1div);
2889 	m2div_frac = tmp;
2890 
2891 	switch (refclk_khz) {
2892 	case 19200:
2893 		iref_ndiv = 1;
2894 		iref_trim = 28;
2895 		iref_pulse_w = 1;
2896 		break;
2897 	case 24000:
2898 		iref_ndiv = 1;
2899 		iref_trim = 25;
2900 		iref_pulse_w = 2;
2901 		break;
2902 	case 38400:
2903 		iref_ndiv = 2;
2904 		iref_trim = 28;
2905 		iref_pulse_w = 1;
2906 		break;
2907 	default:
2908 		MISSING_CASE(refclk_khz);
2909 		return -EINVAL;
2910 	}
2911 
2912 	/*
2913 	 * tdc_res = 0.000003
2914 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2915 	 *
2916 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2917 	 * was supposed to be a division, but we rearranged the operations of
2918 	 * the formula to avoid early divisions so we don't multiply the
2919 	 * rounding errors.
2920 	 *
2921 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2922 	 * we also rearrange to work with integers.
2923 	 *
2924 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2925 	 * last division by 10.
2926 	 */
2927 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2928 
2929 	/*
2930 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2931 	 * 32 bits. That's not a problem since we round the division down
2932 	 * anyway.
2933 	 */
2934 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2935 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2936 
2937 	if (dco_khz >= 9000000) {
2938 		prop_coeff = 5;
2939 		int_coeff = 10;
2940 	} else {
2941 		prop_coeff = 4;
2942 		int_coeff = 8;
2943 	}
2944 
2945 	if (use_ssc) {
2946 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2947 		do_div(tmp, refclk_khz * m1div * 10000);
2948 		ssc_stepsize = tmp;
2949 
2950 		tmp = mul_u32_u32(dco_khz, 1000);
2951 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2952 	} else {
2953 		ssc_stepsize = 0;
2954 		ssc_steplen = 0;
2955 	}
2956 	ssc_steplog = 4;
2957 
2958 	/* write pll_state calculations */
2959 	if (is_dkl) {
2960 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2961 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2962 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2963 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2964 		if (dev_priv->display.vbt.override_afc_startup) {
2965 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2966 
2967 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2968 		}
2969 
2970 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2971 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2972 
2973 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2974 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2975 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2976 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2977 
2978 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2979 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2980 
2981 		pll_state->mg_pll_tdc_coldst_bias =
2982 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2983 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2984 
2985 	} else {
2986 		pll_state->mg_pll_div0 =
2987 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2988 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2989 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2990 
2991 		pll_state->mg_pll_div1 =
2992 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2993 			MG_PLL_DIV1_DITHER_DIV_2 |
2994 			MG_PLL_DIV1_NDIVRATIO(1) |
2995 			MG_PLL_DIV1_FBPREDIV(m1div);
2996 
2997 		pll_state->mg_pll_lf =
2998 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2999 			MG_PLL_LF_AFCCNTSEL_512 |
3000 			MG_PLL_LF_GAINCTRL(1) |
3001 			MG_PLL_LF_INT_COEFF(int_coeff) |
3002 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3003 
3004 		pll_state->mg_pll_frac_lock =
3005 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3006 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3007 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3008 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3009 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3010 		if (use_ssc || m2div_rem > 0)
3011 			pll_state->mg_pll_frac_lock |=
3012 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3013 
3014 		pll_state->mg_pll_ssc =
3015 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3016 			MG_PLL_SSC_TYPE(2) |
3017 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3018 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3019 			MG_PLL_SSC_FLLEN |
3020 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3021 
3022 		pll_state->mg_pll_tdc_coldst_bias =
3023 			MG_PLL_TDC_COLDST_COLDSTART |
3024 			MG_PLL_TDC_COLDST_IREFINT_EN |
3025 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3026 			MG_PLL_TDC_TDCOVCCORR_EN |
3027 			MG_PLL_TDC_TDCSEL(3);
3028 
3029 		pll_state->mg_pll_bias =
3030 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3031 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3032 			MG_PLL_BIAS_BIAS_BONUS(10) |
3033 			MG_PLL_BIAS_BIASCAL_EN |
3034 			MG_PLL_BIAS_CTRIM(12) |
3035 			MG_PLL_BIAS_VREF_RDAC(4) |
3036 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3037 
3038 		if (refclk_khz == 38400) {
3039 			pll_state->mg_pll_tdc_coldst_bias_mask =
3040 				MG_PLL_TDC_COLDST_COLDSTART;
3041 			pll_state->mg_pll_bias_mask = 0;
3042 		} else {
3043 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3044 			pll_state->mg_pll_bias_mask = -1U;
3045 		}
3046 
3047 		pll_state->mg_pll_tdc_coldst_bias &=
3048 			pll_state->mg_pll_tdc_coldst_bias_mask;
3049 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3050 	}
3051 
3052 	return 0;
3053 }
3054 
3055 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3056 				   const struct intel_shared_dpll *pll,
3057 				   const struct intel_dpll_hw_state *pll_state)
3058 {
3059 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3060 	u64 tmp;
3061 
3062 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3063 
3064 	if (DISPLAY_VER(dev_priv) >= 12) {
3065 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3066 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3067 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3068 
3069 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3070 			m2_frac = pll_state->mg_pll_bias &
3071 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3072 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3073 		} else {
3074 			m2_frac = 0;
3075 		}
3076 	} else {
3077 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3078 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3079 
3080 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3081 			m2_frac = pll_state->mg_pll_div0 &
3082 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3083 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3084 		} else {
3085 			m2_frac = 0;
3086 		}
3087 	}
3088 
3089 	switch (pll_state->mg_clktop2_hsclkctl &
3090 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3091 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3092 		div1 = 2;
3093 		break;
3094 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3095 		div1 = 3;
3096 		break;
3097 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3098 		div1 = 5;
3099 		break;
3100 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3101 		div1 = 7;
3102 		break;
3103 	default:
3104 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3105 		return 0;
3106 	}
3107 
3108 	div2 = (pll_state->mg_clktop2_hsclkctl &
3109 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3110 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3111 
3112 	/* div2 value of 0 is same as 1 means no div */
3113 	if (div2 == 0)
3114 		div2 = 1;
3115 
3116 	/*
3117 	 * Adjust the original formula to delay the division by 2^22 in order to
3118 	 * minimize possible rounding errors.
3119 	 */
3120 	tmp = (u64)m1 * m2_int * ref_clock +
3121 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3122 	tmp = div_u64(tmp, 5 * div1 * div2);
3123 
3124 	return tmp;
3125 }
3126 
3127 /**
3128  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3129  * @crtc_state: state for the CRTC to select the DPLL for
3130  * @port_dpll_id: the active @port_dpll_id to select
3131  *
3132  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3133  * CRTC.
3134  */
3135 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3136 			      enum icl_port_dpll_id port_dpll_id)
3137 {
3138 	struct icl_port_dpll *port_dpll =
3139 		&crtc_state->icl_port_dplls[port_dpll_id];
3140 
3141 	crtc_state->shared_dpll = port_dpll->pll;
3142 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3143 }
3144 
3145 static void icl_update_active_dpll(struct intel_atomic_state *state,
3146 				   struct intel_crtc *crtc,
3147 				   struct intel_encoder *encoder)
3148 {
3149 	struct intel_crtc_state *crtc_state =
3150 		intel_atomic_get_new_crtc_state(state, crtc);
3151 	struct intel_digital_port *primary_port;
3152 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3153 
3154 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3155 		enc_to_mst(encoder)->primary :
3156 		enc_to_dig_port(encoder);
3157 
3158 	if (primary_port &&
3159 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3160 	     intel_tc_port_in_legacy_mode(primary_port)))
3161 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3162 
3163 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3164 }
3165 
3166 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3167 				      struct intel_crtc *crtc)
3168 {
3169 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3170 	struct intel_crtc_state *crtc_state =
3171 		intel_atomic_get_new_crtc_state(state, crtc);
3172 	struct icl_port_dpll *port_dpll =
3173 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3174 	struct skl_wrpll_params pll_params = {};
3175 	int ret;
3176 
3177 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3178 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3179 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3180 	else
3181 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3182 
3183 	if (ret)
3184 		return ret;
3185 
3186 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3187 
3188 	/* this is mainly for the fastset check */
3189 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3190 
3191 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3192 							    &port_dpll->hw_state);
3193 
3194 	return 0;
3195 }
3196 
3197 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3198 				  struct intel_crtc *crtc,
3199 				  struct intel_encoder *encoder)
3200 {
3201 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3202 	struct intel_crtc_state *crtc_state =
3203 		intel_atomic_get_new_crtc_state(state, crtc);
3204 	struct icl_port_dpll *port_dpll =
3205 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3206 	enum port port = encoder->port;
3207 	unsigned long dpll_mask;
3208 
3209 	if (IS_ALDERLAKE_S(dev_priv)) {
3210 		dpll_mask =
3211 			BIT(DPLL_ID_DG1_DPLL3) |
3212 			BIT(DPLL_ID_DG1_DPLL2) |
3213 			BIT(DPLL_ID_ICL_DPLL1) |
3214 			BIT(DPLL_ID_ICL_DPLL0);
3215 	} else if (IS_DG1(dev_priv)) {
3216 		if (port == PORT_D || port == PORT_E) {
3217 			dpll_mask =
3218 				BIT(DPLL_ID_DG1_DPLL2) |
3219 				BIT(DPLL_ID_DG1_DPLL3);
3220 		} else {
3221 			dpll_mask =
3222 				BIT(DPLL_ID_DG1_DPLL0) |
3223 				BIT(DPLL_ID_DG1_DPLL1);
3224 		}
3225 	} else if (IS_ROCKETLAKE(dev_priv)) {
3226 		dpll_mask =
3227 			BIT(DPLL_ID_EHL_DPLL4) |
3228 			BIT(DPLL_ID_ICL_DPLL1) |
3229 			BIT(DPLL_ID_ICL_DPLL0);
3230 	} else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
3231 				port != PORT_A) {
3232 		dpll_mask =
3233 			BIT(DPLL_ID_EHL_DPLL4) |
3234 			BIT(DPLL_ID_ICL_DPLL1) |
3235 			BIT(DPLL_ID_ICL_DPLL0);
3236 	} else {
3237 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3238 	}
3239 
3240 	/* Eliminate DPLLs from consideration if reserved by HTI */
3241 	dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3242 
3243 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3244 						&port_dpll->hw_state,
3245 						dpll_mask);
3246 	if (!port_dpll->pll)
3247 		return -EINVAL;
3248 
3249 	intel_reference_shared_dpll(state, crtc,
3250 				    port_dpll->pll, &port_dpll->hw_state);
3251 
3252 	icl_update_active_dpll(state, crtc, encoder);
3253 
3254 	return 0;
3255 }
3256 
3257 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3258 				    struct intel_crtc *crtc)
3259 {
3260 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3261 	struct intel_crtc_state *crtc_state =
3262 		intel_atomic_get_new_crtc_state(state, crtc);
3263 	struct icl_port_dpll *port_dpll =
3264 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3265 	struct skl_wrpll_params pll_params = {};
3266 	int ret;
3267 
3268 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3269 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3270 	if (ret)
3271 		return ret;
3272 
3273 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3274 
3275 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3276 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3277 	if (ret)
3278 		return ret;
3279 
3280 	/* this is mainly for the fastset check */
3281 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3282 
3283 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3284 							 &port_dpll->hw_state);
3285 
3286 	return 0;
3287 }
3288 
3289 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3290 				struct intel_crtc *crtc,
3291 				struct intel_encoder *encoder)
3292 {
3293 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3294 	struct intel_crtc_state *crtc_state =
3295 		intel_atomic_get_new_crtc_state(state, crtc);
3296 	struct icl_port_dpll *port_dpll =
3297 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3298 	enum intel_dpll_id dpll_id;
3299 	int ret;
3300 
3301 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3302 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3303 						&port_dpll->hw_state,
3304 						BIT(DPLL_ID_ICL_TBTPLL));
3305 	if (!port_dpll->pll)
3306 		return -EINVAL;
3307 	intel_reference_shared_dpll(state, crtc,
3308 				    port_dpll->pll, &port_dpll->hw_state);
3309 
3310 
3311 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3312 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3313 							 encoder->port));
3314 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3315 						&port_dpll->hw_state,
3316 						BIT(dpll_id));
3317 	if (!port_dpll->pll) {
3318 		ret = -EINVAL;
3319 		goto err_unreference_tbt_pll;
3320 	}
3321 	intel_reference_shared_dpll(state, crtc,
3322 				    port_dpll->pll, &port_dpll->hw_state);
3323 
3324 	icl_update_active_dpll(state, crtc, encoder);
3325 
3326 	return 0;
3327 
3328 err_unreference_tbt_pll:
3329 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3330 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3331 
3332 	return ret;
3333 }
3334 
3335 static int icl_compute_dplls(struct intel_atomic_state *state,
3336 			     struct intel_crtc *crtc,
3337 			     struct intel_encoder *encoder)
3338 {
3339 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3340 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3341 
3342 	if (intel_phy_is_combo(dev_priv, phy))
3343 		return icl_compute_combo_phy_dpll(state, crtc);
3344 	else if (intel_phy_is_tc(dev_priv, phy))
3345 		return icl_compute_tc_phy_dplls(state, crtc);
3346 
3347 	MISSING_CASE(phy);
3348 
3349 	return 0;
3350 }
3351 
3352 static int icl_get_dplls(struct intel_atomic_state *state,
3353 			 struct intel_crtc *crtc,
3354 			 struct intel_encoder *encoder)
3355 {
3356 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3357 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3358 
3359 	if (intel_phy_is_combo(dev_priv, phy))
3360 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3361 	else if (intel_phy_is_tc(dev_priv, phy))
3362 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3363 
3364 	MISSING_CASE(phy);
3365 
3366 	return -EINVAL;
3367 }
3368 
3369 static void icl_put_dplls(struct intel_atomic_state *state,
3370 			  struct intel_crtc *crtc)
3371 {
3372 	const struct intel_crtc_state *old_crtc_state =
3373 		intel_atomic_get_old_crtc_state(state, crtc);
3374 	struct intel_crtc_state *new_crtc_state =
3375 		intel_atomic_get_new_crtc_state(state, crtc);
3376 	enum icl_port_dpll_id id;
3377 
3378 	new_crtc_state->shared_dpll = NULL;
3379 
3380 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3381 		const struct icl_port_dpll *old_port_dpll =
3382 			&old_crtc_state->icl_port_dplls[id];
3383 		struct icl_port_dpll *new_port_dpll =
3384 			&new_crtc_state->icl_port_dplls[id];
3385 
3386 		new_port_dpll->pll = NULL;
3387 
3388 		if (!old_port_dpll->pll)
3389 			continue;
3390 
3391 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3392 	}
3393 }
3394 
3395 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3396 				struct intel_shared_dpll *pll,
3397 				struct intel_dpll_hw_state *hw_state)
3398 {
3399 	const enum intel_dpll_id id = pll->info->id;
3400 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3401 	intel_wakeref_t wakeref;
3402 	bool ret = false;
3403 	u32 val;
3404 
3405 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3406 
3407 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3408 						     POWER_DOMAIN_DISPLAY_CORE);
3409 	if (!wakeref)
3410 		return false;
3411 
3412 	val = intel_de_read(dev_priv, enable_reg);
3413 	if (!(val & PLL_ENABLE))
3414 		goto out;
3415 
3416 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3417 						  MG_REFCLKIN_CTL(tc_port));
3418 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3419 
3420 	hw_state->mg_clktop2_coreclkctl1 =
3421 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3422 	hw_state->mg_clktop2_coreclkctl1 &=
3423 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3424 
3425 	hw_state->mg_clktop2_hsclkctl =
3426 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3427 	hw_state->mg_clktop2_hsclkctl &=
3428 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3429 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3430 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3431 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3432 
3433 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3434 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3435 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3436 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3437 						   MG_PLL_FRAC_LOCK(tc_port));
3438 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3439 
3440 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3441 	hw_state->mg_pll_tdc_coldst_bias =
3442 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3443 
3444 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3445 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3446 		hw_state->mg_pll_bias_mask = 0;
3447 	} else {
3448 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3449 		hw_state->mg_pll_bias_mask = -1U;
3450 	}
3451 
3452 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3453 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3454 
3455 	ret = true;
3456 out:
3457 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3458 	return ret;
3459 }
3460 
3461 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3462 				 struct intel_shared_dpll *pll,
3463 				 struct intel_dpll_hw_state *hw_state)
3464 {
3465 	const enum intel_dpll_id id = pll->info->id;
3466 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3467 	intel_wakeref_t wakeref;
3468 	bool ret = false;
3469 	u32 val;
3470 
3471 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3472 						     POWER_DOMAIN_DISPLAY_CORE);
3473 	if (!wakeref)
3474 		return false;
3475 
3476 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3477 	if (!(val & PLL_ENABLE))
3478 		goto out;
3479 
3480 	/*
3481 	 * All registers read here have the same HIP_INDEX_REG even though
3482 	 * they are on different building blocks
3483 	 */
3484 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3485 						       DKL_REFCLKIN_CTL(tc_port));
3486 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3487 
3488 	hw_state->mg_clktop2_hsclkctl =
3489 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3490 	hw_state->mg_clktop2_hsclkctl &=
3491 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3492 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3493 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3494 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3495 
3496 	hw_state->mg_clktop2_coreclkctl1 =
3497 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3498 	hw_state->mg_clktop2_coreclkctl1 &=
3499 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3500 
3501 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3502 	val = DKL_PLL_DIV0_MASK;
3503 	if (dev_priv->display.vbt.override_afc_startup)
3504 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3505 	hw_state->mg_pll_div0 &= val;
3506 
3507 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3508 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3509 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3510 
3511 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3512 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3513 				 DKL_PLL_SSC_STEP_LEN_MASK |
3514 				 DKL_PLL_SSC_STEP_NUM_MASK |
3515 				 DKL_PLL_SSC_EN);
3516 
3517 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3518 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3519 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3520 
3521 	hw_state->mg_pll_tdc_coldst_bias =
3522 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3523 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3524 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3525 
3526 	ret = true;
3527 out:
3528 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3529 	return ret;
3530 }
3531 
3532 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3533 				 struct intel_shared_dpll *pll,
3534 				 struct intel_dpll_hw_state *hw_state,
3535 				 i915_reg_t enable_reg)
3536 {
3537 	const enum intel_dpll_id id = pll->info->id;
3538 	intel_wakeref_t wakeref;
3539 	bool ret = false;
3540 	u32 val;
3541 
3542 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3543 						     POWER_DOMAIN_DISPLAY_CORE);
3544 	if (!wakeref)
3545 		return false;
3546 
3547 	val = intel_de_read(dev_priv, enable_reg);
3548 	if (!(val & PLL_ENABLE))
3549 		goto out;
3550 
3551 	if (IS_ALDERLAKE_S(dev_priv)) {
3552 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3553 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3554 	} else if (IS_DG1(dev_priv)) {
3555 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3556 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3557 	} else if (IS_ROCKETLAKE(dev_priv)) {
3558 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3559 						 RKL_DPLL_CFGCR0(id));
3560 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3561 						 RKL_DPLL_CFGCR1(id));
3562 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3563 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3564 						 TGL_DPLL_CFGCR0(id));
3565 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3566 						 TGL_DPLL_CFGCR1(id));
3567 		if (dev_priv->display.vbt.override_afc_startup) {
3568 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3569 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3570 		}
3571 	} else {
3572 		if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
3573 		    id == DPLL_ID_EHL_DPLL4) {
3574 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3575 							 ICL_DPLL_CFGCR0(4));
3576 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3577 							 ICL_DPLL_CFGCR1(4));
3578 		} else {
3579 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3580 							 ICL_DPLL_CFGCR0(id));
3581 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3582 							 ICL_DPLL_CFGCR1(id));
3583 		}
3584 	}
3585 
3586 	ret = true;
3587 out:
3588 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3589 	return ret;
3590 }
3591 
3592 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3593 				   struct intel_shared_dpll *pll,
3594 				   struct intel_dpll_hw_state *hw_state)
3595 {
3596 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3597 
3598 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3599 }
3600 
3601 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3602 				 struct intel_shared_dpll *pll,
3603 				 struct intel_dpll_hw_state *hw_state)
3604 {
3605 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3606 }
3607 
3608 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3609 			   struct intel_shared_dpll *pll)
3610 {
3611 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3612 	const enum intel_dpll_id id = pll->info->id;
3613 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3614 
3615 	if (IS_ALDERLAKE_S(dev_priv)) {
3616 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3617 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3618 	} else if (IS_DG1(dev_priv)) {
3619 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3620 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3621 	} else if (IS_ROCKETLAKE(dev_priv)) {
3622 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3623 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3624 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3625 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3626 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3627 		div0_reg = TGL_DPLL0_DIV0(id);
3628 	} else {
3629 		if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
3630 		    id == DPLL_ID_EHL_DPLL4) {
3631 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3632 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3633 		} else {
3634 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3635 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3636 		}
3637 	}
3638 
3639 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3640 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3641 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3642 			 !i915_mmio_reg_valid(div0_reg));
3643 	if (dev_priv->display.vbt.override_afc_startup &&
3644 	    i915_mmio_reg_valid(div0_reg))
3645 		intel_de_rmw(dev_priv, div0_reg,
3646 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3647 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3648 }
3649 
3650 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3651 			     struct intel_shared_dpll *pll)
3652 {
3653 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3654 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3655 
3656 	/*
3657 	 * Some of the following registers have reserved fields, so program
3658 	 * these with RMW based on a mask. The mask can be fixed or generated
3659 	 * during the calc/readout phase if the mask depends on some other HW
3660 	 * state like refclk, see icl_calc_mg_pll_state().
3661 	 */
3662 	intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
3663 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3664 
3665 	intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
3666 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3667 		     hw_state->mg_clktop2_coreclkctl1);
3668 
3669 	intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
3670 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3671 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3672 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3673 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3674 		     hw_state->mg_clktop2_hsclkctl);
3675 
3676 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3677 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3678 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3679 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3680 		       hw_state->mg_pll_frac_lock);
3681 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3682 
3683 	intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
3684 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3685 
3686 	intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
3687 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3688 		     hw_state->mg_pll_tdc_coldst_bias);
3689 
3690 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3691 }
3692 
3693 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3694 			  struct intel_shared_dpll *pll)
3695 {
3696 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3697 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3698 	u32 val;
3699 
3700 	/*
3701 	 * All registers programmed here have the same HIP_INDEX_REG even
3702 	 * though on different building block
3703 	 */
3704 	/* All the registers are RMW */
3705 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3706 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3707 	val |= hw_state->mg_refclkin_ctl;
3708 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3709 
3710 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3711 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3712 	val |= hw_state->mg_clktop2_coreclkctl1;
3713 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3714 
3715 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3716 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3717 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3718 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3719 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3720 	val |= hw_state->mg_clktop2_hsclkctl;
3721 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3722 
3723 	val = DKL_PLL_DIV0_MASK;
3724 	if (dev_priv->display.vbt.override_afc_startup)
3725 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3726 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3727 			  hw_state->mg_pll_div0);
3728 
3729 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3730 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3731 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3732 	val |= hw_state->mg_pll_div1;
3733 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3734 
3735 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3736 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3737 		 DKL_PLL_SSC_STEP_LEN_MASK |
3738 		 DKL_PLL_SSC_STEP_NUM_MASK |
3739 		 DKL_PLL_SSC_EN);
3740 	val |= hw_state->mg_pll_ssc;
3741 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3742 
3743 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3744 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3745 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3746 	val |= hw_state->mg_pll_bias;
3747 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3748 
3749 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3750 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3751 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3752 	val |= hw_state->mg_pll_tdc_coldst_bias;
3753 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3754 
3755 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3756 }
3757 
3758 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3759 				 struct intel_shared_dpll *pll,
3760 				 i915_reg_t enable_reg)
3761 {
3762 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3763 
3764 	/*
3765 	 * The spec says we need to "wait" but it also says it should be
3766 	 * immediate.
3767 	 */
3768 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3769 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3770 			pll->info->id);
3771 }
3772 
3773 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3774 			   struct intel_shared_dpll *pll,
3775 			   i915_reg_t enable_reg)
3776 {
3777 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3778 
3779 	/* Timeout is actually 600us. */
3780 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3781 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3782 }
3783 
3784 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3785 {
3786 	u32 val;
3787 
3788 	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3789 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3790 		return;
3791 	/*
3792 	 * Wa_16011069516:adl-p[a0]
3793 	 *
3794 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3795 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3796 	 * sanity check this assumption with a double read, which presumably
3797 	 * returns the correct value even with clock gating on.
3798 	 *
3799 	 * Instead of the usual place for workarounds we apply this one here,
3800 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3801 	 */
3802 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3803 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3804 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3805 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3806 }
3807 
3808 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3809 			     struct intel_shared_dpll *pll)
3810 {
3811 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3812 
3813 	if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
3814 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3815 
3816 		/*
3817 		 * We need to disable DC states when this DPLL is enabled.
3818 		 * This can be done by taking a reference on DPLL4 power
3819 		 * domain.
3820 		 */
3821 		pll->wakeref = intel_display_power_get(dev_priv,
3822 						       POWER_DOMAIN_DC_OFF);
3823 	}
3824 
3825 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3826 
3827 	icl_dpll_write(dev_priv, pll);
3828 
3829 	/*
3830 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3831 	 * paths should already be setting the appropriate voltage, hence we do
3832 	 * nothing here.
3833 	 */
3834 
3835 	icl_pll_enable(dev_priv, pll, enable_reg);
3836 
3837 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3838 
3839 	/* DVFS post sequence would be here. See the comment above. */
3840 }
3841 
3842 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3843 			   struct intel_shared_dpll *pll)
3844 {
3845 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3846 
3847 	icl_dpll_write(dev_priv, pll);
3848 
3849 	/*
3850 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3851 	 * paths should already be setting the appropriate voltage, hence we do
3852 	 * nothing here.
3853 	 */
3854 
3855 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3856 
3857 	/* DVFS post sequence would be here. See the comment above. */
3858 }
3859 
3860 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3861 			  struct intel_shared_dpll *pll)
3862 {
3863 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3864 
3865 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3866 
3867 	if (DISPLAY_VER(dev_priv) >= 12)
3868 		dkl_pll_write(dev_priv, pll);
3869 	else
3870 		icl_mg_pll_write(dev_priv, pll);
3871 
3872 	/*
3873 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3874 	 * paths should already be setting the appropriate voltage, hence we do
3875 	 * nothing here.
3876 	 */
3877 
3878 	icl_pll_enable(dev_priv, pll, enable_reg);
3879 
3880 	/* DVFS post sequence would be here. See the comment above. */
3881 }
3882 
3883 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3884 			    struct intel_shared_dpll *pll,
3885 			    i915_reg_t enable_reg)
3886 {
3887 	/* The first steps are done by intel_ddi_post_disable(). */
3888 
3889 	/*
3890 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3891 	 * paths should already be setting the appropriate voltage, hence we do
3892 	 * nothing here.
3893 	 */
3894 
3895 	intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3896 
3897 	/* Timeout is actually 1us. */
3898 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3899 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3900 
3901 	/* DVFS post sequence would be here. See the comment above. */
3902 
3903 	intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3904 
3905 	/*
3906 	 * The spec says we need to "wait" but it also says it should be
3907 	 * immediate.
3908 	 */
3909 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3910 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3911 			pll->info->id);
3912 }
3913 
3914 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3915 			      struct intel_shared_dpll *pll)
3916 {
3917 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3918 
3919 	icl_pll_disable(dev_priv, pll, enable_reg);
3920 
3921 	if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
3922 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3923 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3924 					pll->wakeref);
3925 }
3926 
3927 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3928 			    struct intel_shared_dpll *pll)
3929 {
3930 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3931 }
3932 
3933 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3934 			   struct intel_shared_dpll *pll)
3935 {
3936 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3937 
3938 	icl_pll_disable(dev_priv, pll, enable_reg);
3939 }
3940 
3941 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3942 {
3943 	/* No SSC ref */
3944 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3945 }
3946 
3947 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3948 			      const struct intel_dpll_hw_state *hw_state)
3949 {
3950 	drm_dbg_kms(&dev_priv->drm,
3951 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3952 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3953 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3954 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3955 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3956 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3957 		    hw_state->cfgcr0, hw_state->cfgcr1,
3958 		    hw_state->div0,
3959 		    hw_state->mg_refclkin_ctl,
3960 		    hw_state->mg_clktop2_coreclkctl1,
3961 		    hw_state->mg_clktop2_hsclkctl,
3962 		    hw_state->mg_pll_div0,
3963 		    hw_state->mg_pll_div1,
3964 		    hw_state->mg_pll_lf,
3965 		    hw_state->mg_pll_frac_lock,
3966 		    hw_state->mg_pll_ssc,
3967 		    hw_state->mg_pll_bias,
3968 		    hw_state->mg_pll_tdc_coldst_bias);
3969 }
3970 
3971 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3972 	.enable = combo_pll_enable,
3973 	.disable = combo_pll_disable,
3974 	.get_hw_state = combo_pll_get_hw_state,
3975 	.get_freq = icl_ddi_combo_pll_get_freq,
3976 };
3977 
3978 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3979 	.enable = tbt_pll_enable,
3980 	.disable = tbt_pll_disable,
3981 	.get_hw_state = tbt_pll_get_hw_state,
3982 	.get_freq = icl_ddi_tbt_pll_get_freq,
3983 };
3984 
3985 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3986 	.enable = mg_pll_enable,
3987 	.disable = mg_pll_disable,
3988 	.get_hw_state = mg_pll_get_hw_state,
3989 	.get_freq = icl_ddi_mg_pll_get_freq,
3990 };
3991 
3992 static const struct dpll_info icl_plls[] = {
3993 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3994 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3995 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3996 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3997 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3998 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3999 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4000 	{ },
4001 };
4002 
4003 static const struct intel_dpll_mgr icl_pll_mgr = {
4004 	.dpll_info = icl_plls,
4005 	.compute_dplls = icl_compute_dplls,
4006 	.get_dplls = icl_get_dplls,
4007 	.put_dplls = icl_put_dplls,
4008 	.update_active_dpll = icl_update_active_dpll,
4009 	.update_ref_clks = icl_update_dpll_ref_clks,
4010 	.dump_hw_state = icl_dump_hw_state,
4011 };
4012 
4013 static const struct dpll_info ehl_plls[] = {
4014 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4015 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4016 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4017 	{ },
4018 };
4019 
4020 static const struct intel_dpll_mgr ehl_pll_mgr = {
4021 	.dpll_info = ehl_plls,
4022 	.compute_dplls = icl_compute_dplls,
4023 	.get_dplls = icl_get_dplls,
4024 	.put_dplls = icl_put_dplls,
4025 	.update_ref_clks = icl_update_dpll_ref_clks,
4026 	.dump_hw_state = icl_dump_hw_state,
4027 };
4028 
4029 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4030 	.enable = mg_pll_enable,
4031 	.disable = mg_pll_disable,
4032 	.get_hw_state = dkl_pll_get_hw_state,
4033 	.get_freq = icl_ddi_mg_pll_get_freq,
4034 };
4035 
4036 static const struct dpll_info tgl_plls[] = {
4037 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4038 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4039 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4040 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4041 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4042 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4043 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4044 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4045 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4046 	{ },
4047 };
4048 
4049 static const struct intel_dpll_mgr tgl_pll_mgr = {
4050 	.dpll_info = tgl_plls,
4051 	.compute_dplls = icl_compute_dplls,
4052 	.get_dplls = icl_get_dplls,
4053 	.put_dplls = icl_put_dplls,
4054 	.update_active_dpll = icl_update_active_dpll,
4055 	.update_ref_clks = icl_update_dpll_ref_clks,
4056 	.dump_hw_state = icl_dump_hw_state,
4057 };
4058 
4059 static const struct dpll_info rkl_plls[] = {
4060 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4061 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4062 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4063 	{ },
4064 };
4065 
4066 static const struct intel_dpll_mgr rkl_pll_mgr = {
4067 	.dpll_info = rkl_plls,
4068 	.compute_dplls = icl_compute_dplls,
4069 	.get_dplls = icl_get_dplls,
4070 	.put_dplls = icl_put_dplls,
4071 	.update_ref_clks = icl_update_dpll_ref_clks,
4072 	.dump_hw_state = icl_dump_hw_state,
4073 };
4074 
4075 static const struct dpll_info dg1_plls[] = {
4076 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4077 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4078 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4079 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4080 	{ },
4081 };
4082 
4083 static const struct intel_dpll_mgr dg1_pll_mgr = {
4084 	.dpll_info = dg1_plls,
4085 	.compute_dplls = icl_compute_dplls,
4086 	.get_dplls = icl_get_dplls,
4087 	.put_dplls = icl_put_dplls,
4088 	.update_ref_clks = icl_update_dpll_ref_clks,
4089 	.dump_hw_state = icl_dump_hw_state,
4090 };
4091 
4092 static const struct dpll_info adls_plls[] = {
4093 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4094 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4095 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4096 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4097 	{ },
4098 };
4099 
4100 static const struct intel_dpll_mgr adls_pll_mgr = {
4101 	.dpll_info = adls_plls,
4102 	.compute_dplls = icl_compute_dplls,
4103 	.get_dplls = icl_get_dplls,
4104 	.put_dplls = icl_put_dplls,
4105 	.update_ref_clks = icl_update_dpll_ref_clks,
4106 	.dump_hw_state = icl_dump_hw_state,
4107 };
4108 
4109 static const struct dpll_info adlp_plls[] = {
4110 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4111 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4112 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4113 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4114 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4115 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4116 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4117 	{ },
4118 };
4119 
4120 static const struct intel_dpll_mgr adlp_pll_mgr = {
4121 	.dpll_info = adlp_plls,
4122 	.compute_dplls = icl_compute_dplls,
4123 	.get_dplls = icl_get_dplls,
4124 	.put_dplls = icl_put_dplls,
4125 	.update_active_dpll = icl_update_active_dpll,
4126 	.update_ref_clks = icl_update_dpll_ref_clks,
4127 	.dump_hw_state = icl_dump_hw_state,
4128 };
4129 
4130 /**
4131  * intel_shared_dpll_init - Initialize shared DPLLs
4132  * @dev_priv: i915 device
4133  *
4134  * Initialize shared DPLLs for @dev_priv.
4135  */
4136 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4137 {
4138 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4139 	const struct dpll_info *dpll_info;
4140 	int i;
4141 
4142 	mutex_init(&dev_priv->display.dpll.lock);
4143 
4144 	if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
4145 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4146 		dpll_mgr = NULL;
4147 	else if (IS_ALDERLAKE_P(dev_priv))
4148 		dpll_mgr = &adlp_pll_mgr;
4149 	else if (IS_ALDERLAKE_S(dev_priv))
4150 		dpll_mgr = &adls_pll_mgr;
4151 	else if (IS_DG1(dev_priv))
4152 		dpll_mgr = &dg1_pll_mgr;
4153 	else if (IS_ROCKETLAKE(dev_priv))
4154 		dpll_mgr = &rkl_pll_mgr;
4155 	else if (DISPLAY_VER(dev_priv) >= 12)
4156 		dpll_mgr = &tgl_pll_mgr;
4157 	else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
4158 		dpll_mgr = &ehl_pll_mgr;
4159 	else if (DISPLAY_VER(dev_priv) >= 11)
4160 		dpll_mgr = &icl_pll_mgr;
4161 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4162 		dpll_mgr = &bxt_pll_mgr;
4163 	else if (DISPLAY_VER(dev_priv) == 9)
4164 		dpll_mgr = &skl_pll_mgr;
4165 	else if (HAS_DDI(dev_priv))
4166 		dpll_mgr = &hsw_pll_mgr;
4167 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4168 		dpll_mgr = &pch_pll_mgr;
4169 
4170 	if (!dpll_mgr) {
4171 		dev_priv->display.dpll.num_shared_dpll = 0;
4172 		return;
4173 	}
4174 
4175 	dpll_info = dpll_mgr->dpll_info;
4176 
4177 	for (i = 0; dpll_info[i].name; i++) {
4178 		if (drm_WARN_ON(&dev_priv->drm,
4179 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4180 			break;
4181 
4182 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4183 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4184 	}
4185 
4186 	dev_priv->display.dpll.mgr = dpll_mgr;
4187 	dev_priv->display.dpll.num_shared_dpll = i;
4188 }
4189 
4190 /**
4191  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4192  * @state: atomic state
4193  * @crtc: CRTC to compute DPLLs for
4194  * @encoder: encoder
4195  *
4196  * This function computes the DPLL state for the given CRTC and encoder.
4197  *
4198  * The new configuration in the atomic commit @state is made effective by
4199  * calling intel_shared_dpll_swap_state().
4200  *
4201  * Returns:
4202  * 0 on success, negative error code on falure.
4203  */
4204 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4205 			       struct intel_crtc *crtc,
4206 			       struct intel_encoder *encoder)
4207 {
4208 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4209 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4210 
4211 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4212 		return -EINVAL;
4213 
4214 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4215 }
4216 
4217 /**
4218  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4219  * @state: atomic state
4220  * @crtc: CRTC to reserve DPLLs for
4221  * @encoder: encoder
4222  *
4223  * This function reserves all required DPLLs for the given CRTC and encoder
4224  * combination in the current atomic commit @state and the new @crtc atomic
4225  * state.
4226  *
4227  * The new configuration in the atomic commit @state is made effective by
4228  * calling intel_shared_dpll_swap_state().
4229  *
4230  * The reserved DPLLs should be released by calling
4231  * intel_release_shared_dplls().
4232  *
4233  * Returns:
4234  * 0 if all required DPLLs were successfully reserved,
4235  * negative error code otherwise.
4236  */
4237 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4238 			       struct intel_crtc *crtc,
4239 			       struct intel_encoder *encoder)
4240 {
4241 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4242 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4243 
4244 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4245 		return -EINVAL;
4246 
4247 	return dpll_mgr->get_dplls(state, crtc, encoder);
4248 }
4249 
4250 /**
4251  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4252  * @state: atomic state
4253  * @crtc: crtc from which the DPLLs are to be released
4254  *
4255  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4256  * from the current atomic commit @state and the old @crtc atomic state.
4257  *
4258  * The new configuration in the atomic commit @state is made effective by
4259  * calling intel_shared_dpll_swap_state().
4260  */
4261 void intel_release_shared_dplls(struct intel_atomic_state *state,
4262 				struct intel_crtc *crtc)
4263 {
4264 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4265 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4266 
4267 	/*
4268 	 * FIXME: this function is called for every platform having a
4269 	 * compute_clock hook, even though the platform doesn't yet support
4270 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4271 	 * called on those.
4272 	 */
4273 	if (!dpll_mgr)
4274 		return;
4275 
4276 	dpll_mgr->put_dplls(state, crtc);
4277 }
4278 
4279 /**
4280  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4281  * @state: atomic state
4282  * @crtc: the CRTC for which to update the active DPLL
4283  * @encoder: encoder determining the type of port DPLL
4284  *
4285  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4286  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4287  * DPLL selected will be based on the current mode of the encoder's port.
4288  */
4289 void intel_update_active_dpll(struct intel_atomic_state *state,
4290 			      struct intel_crtc *crtc,
4291 			      struct intel_encoder *encoder)
4292 {
4293 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4294 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4295 
4296 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4297 		return;
4298 
4299 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4300 }
4301 
4302 /**
4303  * intel_dpll_get_freq - calculate the DPLL's output frequency
4304  * @i915: i915 device
4305  * @pll: DPLL for which to calculate the output frequency
4306  * @pll_state: DPLL state from which to calculate the output frequency
4307  *
4308  * Return the output frequency corresponding to @pll's passed in @pll_state.
4309  */
4310 int intel_dpll_get_freq(struct drm_i915_private *i915,
4311 			const struct intel_shared_dpll *pll,
4312 			const struct intel_dpll_hw_state *pll_state)
4313 {
4314 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4315 		return 0;
4316 
4317 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4318 }
4319 
4320 /**
4321  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4322  * @i915: i915 device
4323  * @pll: DPLL for which to calculate the output frequency
4324  * @hw_state: DPLL's hardware state
4325  *
4326  * Read out @pll's hardware state into @hw_state.
4327  */
4328 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4329 			     struct intel_shared_dpll *pll,
4330 			     struct intel_dpll_hw_state *hw_state)
4331 {
4332 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4333 }
4334 
4335 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4336 				  struct intel_shared_dpll *pll)
4337 {
4338 	struct intel_crtc *crtc;
4339 
4340 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4341 
4342 	if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
4343 	    pll->on &&
4344 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4345 		pll->wakeref = intel_display_power_get(i915,
4346 						       POWER_DOMAIN_DC_OFF);
4347 	}
4348 
4349 	pll->state.pipe_mask = 0;
4350 	for_each_intel_crtc(&i915->drm, crtc) {
4351 		struct intel_crtc_state *crtc_state =
4352 			to_intel_crtc_state(crtc->base.state);
4353 
4354 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4355 			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4356 	}
4357 	pll->active_mask = pll->state.pipe_mask;
4358 
4359 	drm_dbg_kms(&i915->drm,
4360 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4361 		    pll->info->name, pll->state.pipe_mask, pll->on);
4362 }
4363 
4364 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4365 {
4366 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4367 		i915->display.dpll.mgr->update_ref_clks(i915);
4368 }
4369 
4370 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4371 {
4372 	int i;
4373 
4374 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4375 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4376 }
4377 
4378 static void sanitize_dpll_state(struct drm_i915_private *i915,
4379 				struct intel_shared_dpll *pll)
4380 {
4381 	if (!pll->on)
4382 		return;
4383 
4384 	adlp_cmtg_clock_gating_wa(i915, pll);
4385 
4386 	if (pll->active_mask)
4387 		return;
4388 
4389 	drm_dbg_kms(&i915->drm,
4390 		    "%s enabled but not in use, disabling\n",
4391 		    pll->info->name);
4392 
4393 	pll->info->funcs->disable(i915, pll);
4394 	pll->on = false;
4395 }
4396 
4397 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4398 {
4399 	int i;
4400 
4401 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4402 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4403 }
4404 
4405 /**
4406  * intel_dpll_dump_hw_state - write hw_state to dmesg
4407  * @dev_priv: i915 drm device
4408  * @hw_state: hw state to be written to the log
4409  *
4410  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4411  */
4412 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4413 			      const struct intel_dpll_hw_state *hw_state)
4414 {
4415 	if (dev_priv->display.dpll.mgr) {
4416 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4417 	} else {
4418 		/* fallback for platforms that don't use the shared dpll
4419 		 * infrastructure
4420 		 */
4421 		drm_dbg_kms(&dev_priv->drm,
4422 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4423 			    "fp0: 0x%x, fp1: 0x%x\n",
4424 			    hw_state->dpll,
4425 			    hw_state->dpll_md,
4426 			    hw_state->fp0,
4427 			    hw_state->fp1);
4428 	}
4429 }
4430 
4431 static void
4432 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4433 			 struct intel_shared_dpll *pll,
4434 			 struct intel_crtc *crtc,
4435 			 struct intel_crtc_state *new_crtc_state)
4436 {
4437 	struct intel_dpll_hw_state dpll_hw_state;
4438 	u8 pipe_mask;
4439 	bool active;
4440 
4441 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4442 
4443 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4444 
4445 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4446 
4447 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4448 		I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask,
4449 				"pll in active use but not on in sw tracking\n");
4450 		I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask,
4451 				"pll is on but not used by any active pipe\n");
4452 		I915_STATE_WARN(dev_priv, pll->on != active,
4453 				"pll on state mismatch (expected %i, found %i)\n",
4454 				pll->on, active);
4455 	}
4456 
4457 	if (!crtc) {
4458 		I915_STATE_WARN(dev_priv,
4459 				pll->active_mask & ~pll->state.pipe_mask,
4460 				"more active pll users than references: 0x%x vs 0x%x\n",
4461 				pll->active_mask, pll->state.pipe_mask);
4462 
4463 		return;
4464 	}
4465 
4466 	pipe_mask = BIT(crtc->pipe);
4467 
4468 	if (new_crtc_state->hw.active)
4469 		I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask),
4470 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4471 				pipe_name(crtc->pipe), pll->active_mask);
4472 	else
4473 		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4474 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4475 				pipe_name(crtc->pipe), pll->active_mask);
4476 
4477 	I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask),
4478 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4479 			pipe_mask, pll->state.pipe_mask);
4480 
4481 	I915_STATE_WARN(dev_priv,
4482 			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4483 					  sizeof(dpll_hw_state)),
4484 			"pll hw state mismatch\n");
4485 }
4486 
4487 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4488 				    struct intel_crtc_state *old_crtc_state,
4489 				    struct intel_crtc_state *new_crtc_state)
4490 {
4491 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4492 
4493 	if (new_crtc_state->shared_dpll)
4494 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4495 					 crtc, new_crtc_state);
4496 
4497 	if (old_crtc_state->shared_dpll &&
4498 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4499 		u8 pipe_mask = BIT(crtc->pipe);
4500 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4501 
4502 		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4503 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4504 				pipe_name(crtc->pipe), pll->active_mask);
4505 		I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask,
4506 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4507 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4508 	}
4509 }
4510 
4511 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4512 {
4513 	int i;
4514 
4515 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4516 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4517 					 NULL, NULL);
4518 }
4519