xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_dpll_mgr.c (revision 56ea353ea49ad21dd4c14e7baa235493ec27e766)
1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "intel_de.h"
27 #include "intel_display_types.h"
28 #include "intel_dkl_phy.h"
29 #include "intel_dkl_phy_regs.h"
30 #include "intel_dpio_phy.h"
31 #include "intel_dpll.h"
32 #include "intel_dpll_mgr.h"
33 #include "intel_mg_phy_regs.h"
34 #include "intel_pch_refclk.h"
35 #include "intel_tc.h"
36 
37 /**
38  * DOC: Display PLLs
39  *
40  * Display PLLs used for driving outputs vary by platform. While some have
41  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
42  * from a pool. In the latter scenario, it is possible that multiple pipes
43  * share a PLL if their configurations match.
44  *
45  * This file provides an abstraction over display PLLs. The function
46  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
47  * users of a PLL are tracked and that tracking is integrated with the atomic
48  * modset interface. During an atomic operation, required PLLs can be reserved
49  * for a given CRTC and encoder configuration by calling
50  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
51  * with intel_release_shared_dplls().
52  * Changes to the users are first staged in the atomic state, and then made
53  * effective by calling intel_shared_dpll_swap_state() during the atomic
54  * commit phase.
55  */
56 
57 /* platform specific hooks for managing DPLLs */
58 struct intel_shared_dpll_funcs {
59 	/*
60 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
61 	 * the pll is not already enabled.
62 	 */
63 	void (*enable)(struct drm_i915_private *i915,
64 		       struct intel_shared_dpll *pll);
65 
66 	/*
67 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
68 	 * only when it is safe to disable the pll, i.e., there are no more
69 	 * tracked users for it.
70 	 */
71 	void (*disable)(struct drm_i915_private *i915,
72 			struct intel_shared_dpll *pll);
73 
74 	/*
75 	 * Hook for reading the values currently programmed to the DPLL
76 	 * registers. This is used for initial hw state readout and state
77 	 * verification after a mode set.
78 	 */
79 	bool (*get_hw_state)(struct drm_i915_private *i915,
80 			     struct intel_shared_dpll *pll,
81 			     struct intel_dpll_hw_state *hw_state);
82 
83 	/*
84 	 * Hook for calculating the pll's output frequency based on its passed
85 	 * in state.
86 	 */
87 	int (*get_freq)(struct drm_i915_private *i915,
88 			const struct intel_shared_dpll *pll,
89 			const struct intel_dpll_hw_state *pll_state);
90 };
91 
92 struct intel_dpll_mgr {
93 	const struct dpll_info *dpll_info;
94 
95 	int (*compute_dplls)(struct intel_atomic_state *state,
96 			     struct intel_crtc *crtc,
97 			     struct intel_encoder *encoder);
98 	int (*get_dplls)(struct intel_atomic_state *state,
99 			 struct intel_crtc *crtc,
100 			 struct intel_encoder *encoder);
101 	void (*put_dplls)(struct intel_atomic_state *state,
102 			  struct intel_crtc *crtc);
103 	void (*update_active_dpll)(struct intel_atomic_state *state,
104 				   struct intel_crtc *crtc,
105 				   struct intel_encoder *encoder);
106 	void (*update_ref_clks)(struct drm_i915_private *i915);
107 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
108 			      const struct intel_dpll_hw_state *hw_state);
109 };
110 
111 static void
112 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
113 				  struct intel_shared_dpll_state *shared_dpll)
114 {
115 	enum intel_dpll_id i;
116 
117 	/* Copy shared dpll state */
118 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
119 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
120 
121 		shared_dpll[i] = pll->state;
122 	}
123 }
124 
125 static struct intel_shared_dpll_state *
126 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
127 {
128 	struct intel_atomic_state *state = to_intel_atomic_state(s);
129 
130 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
131 
132 	if (!state->dpll_set) {
133 		state->dpll_set = true;
134 
135 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
136 						  state->shared_dpll);
137 	}
138 
139 	return state->shared_dpll;
140 }
141 
142 /**
143  * intel_get_shared_dpll_by_id - get a DPLL given its id
144  * @dev_priv: i915 device instance
145  * @id: pll id
146  *
147  * Returns:
148  * A pointer to the DPLL with @id
149  */
150 struct intel_shared_dpll *
151 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
152 			    enum intel_dpll_id id)
153 {
154 	return &dev_priv->display.dpll.shared_dplls[id];
155 }
156 
157 /* For ILK+ */
158 void assert_shared_dpll(struct drm_i915_private *dev_priv,
159 			struct intel_shared_dpll *pll,
160 			bool state)
161 {
162 	bool cur_state;
163 	struct intel_dpll_hw_state hw_state;
164 
165 	if (drm_WARN(&dev_priv->drm, !pll,
166 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
167 		return;
168 
169 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
170 	I915_STATE_WARN(cur_state != state,
171 	     "%s assertion failure (expected %s, current %s)\n",
172 			pll->info->name, str_on_off(state),
173 			str_on_off(cur_state));
174 }
175 
176 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
177 {
178 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
179 }
180 
181 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
182 {
183 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
184 }
185 
186 static i915_reg_t
187 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
188 			   struct intel_shared_dpll *pll)
189 {
190 	if (IS_DG1(i915))
191 		return DG1_DPLL_ENABLE(pll->info->id);
192 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
193 		return MG_PLL_ENABLE(0);
194 
195 	return ICL_DPLL_ENABLE(pll->info->id);
196 }
197 
198 static i915_reg_t
199 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
200 			struct intel_shared_dpll *pll)
201 {
202 	const enum intel_dpll_id id = pll->info->id;
203 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
204 
205 	if (IS_ALDERLAKE_P(i915))
206 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
207 
208 	return MG_PLL_ENABLE(tc_port);
209 }
210 
211 /**
212  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
213  * @crtc_state: CRTC, and its state, which has a shared DPLL
214  *
215  * Enable the shared DPLL used by @crtc.
216  */
217 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
218 {
219 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
220 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
221 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
222 	unsigned int pipe_mask = BIT(crtc->pipe);
223 	unsigned int old_mask;
224 
225 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
226 		return;
227 
228 	mutex_lock(&dev_priv->display.dpll.lock);
229 	old_mask = pll->active_mask;
230 
231 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
232 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
233 		goto out;
234 
235 	pll->active_mask |= pipe_mask;
236 
237 	drm_dbg_kms(&dev_priv->drm,
238 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
239 		    pll->info->name, pll->active_mask, pll->on,
240 		    crtc->base.base.id, crtc->base.name);
241 
242 	if (old_mask) {
243 		drm_WARN_ON(&dev_priv->drm, !pll->on);
244 		assert_shared_dpll_enabled(dev_priv, pll);
245 		goto out;
246 	}
247 	drm_WARN_ON(&dev_priv->drm, pll->on);
248 
249 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
250 	pll->info->funcs->enable(dev_priv, pll);
251 	pll->on = true;
252 
253 out:
254 	mutex_unlock(&dev_priv->display.dpll.lock);
255 }
256 
257 /**
258  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
259  * @crtc_state: CRTC, and its state, which has a shared DPLL
260  *
261  * Disable the shared DPLL used by @crtc.
262  */
263 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
264 {
265 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
266 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
267 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
268 	unsigned int pipe_mask = BIT(crtc->pipe);
269 
270 	/* PCH only available on ILK+ */
271 	if (DISPLAY_VER(dev_priv) < 5)
272 		return;
273 
274 	if (pll == NULL)
275 		return;
276 
277 	mutex_lock(&dev_priv->display.dpll.lock);
278 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
279 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
280 		     crtc->base.base.id, crtc->base.name))
281 		goto out;
282 
283 	drm_dbg_kms(&dev_priv->drm,
284 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
285 		    pll->info->name, pll->active_mask, pll->on,
286 		    crtc->base.base.id, crtc->base.name);
287 
288 	assert_shared_dpll_enabled(dev_priv, pll);
289 	drm_WARN_ON(&dev_priv->drm, !pll->on);
290 
291 	pll->active_mask &= ~pipe_mask;
292 	if (pll->active_mask)
293 		goto out;
294 
295 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
296 	pll->info->funcs->disable(dev_priv, pll);
297 	pll->on = false;
298 
299 out:
300 	mutex_unlock(&dev_priv->display.dpll.lock);
301 }
302 
303 static struct intel_shared_dpll *
304 intel_find_shared_dpll(struct intel_atomic_state *state,
305 		       const struct intel_crtc *crtc,
306 		       const struct intel_dpll_hw_state *pll_state,
307 		       unsigned long dpll_mask)
308 {
309 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
310 	struct intel_shared_dpll *pll, *unused_pll = NULL;
311 	struct intel_shared_dpll_state *shared_dpll;
312 	enum intel_dpll_id i;
313 
314 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
315 
316 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
317 
318 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
319 		pll = &dev_priv->display.dpll.shared_dplls[i];
320 
321 		/* Only want to check enabled timings first */
322 		if (shared_dpll[i].pipe_mask == 0) {
323 			if (!unused_pll)
324 				unused_pll = pll;
325 			continue;
326 		}
327 
328 		if (memcmp(pll_state,
329 			   &shared_dpll[i].hw_state,
330 			   sizeof(*pll_state)) == 0) {
331 			drm_dbg_kms(&dev_priv->drm,
332 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
333 				    crtc->base.base.id, crtc->base.name,
334 				    pll->info->name,
335 				    shared_dpll[i].pipe_mask,
336 				    pll->active_mask);
337 			return pll;
338 		}
339 	}
340 
341 	/* Ok no matching timings, maybe there's a free one? */
342 	if (unused_pll) {
343 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
344 			    crtc->base.base.id, crtc->base.name,
345 			    unused_pll->info->name);
346 		return unused_pll;
347 	}
348 
349 	return NULL;
350 }
351 
352 static void
353 intel_reference_shared_dpll(struct intel_atomic_state *state,
354 			    const struct intel_crtc *crtc,
355 			    const struct intel_shared_dpll *pll,
356 			    const struct intel_dpll_hw_state *pll_state)
357 {
358 	struct drm_i915_private *i915 = to_i915(state->base.dev);
359 	struct intel_shared_dpll_state *shared_dpll;
360 	const enum intel_dpll_id id = pll->info->id;
361 
362 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
363 
364 	if (shared_dpll[id].pipe_mask == 0)
365 		shared_dpll[id].hw_state = *pll_state;
366 
367 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0);
368 
369 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
370 
371 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
372 		    crtc->base.base.id, crtc->base.name, pll->info->name);
373 }
374 
375 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
376 					  const struct intel_crtc *crtc,
377 					  const struct intel_shared_dpll *pll)
378 {
379 	struct drm_i915_private *i915 = to_i915(state->base.dev);
380 	struct intel_shared_dpll_state *shared_dpll;
381 	const enum intel_dpll_id id = pll->info->id;
382 
383 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
384 
385 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0);
386 
387 	shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe);
388 
389 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
390 		    crtc->base.base.id, crtc->base.name, pll->info->name);
391 }
392 
393 static void intel_put_dpll(struct intel_atomic_state *state,
394 			   struct intel_crtc *crtc)
395 {
396 	const struct intel_crtc_state *old_crtc_state =
397 		intel_atomic_get_old_crtc_state(state, crtc);
398 	struct intel_crtc_state *new_crtc_state =
399 		intel_atomic_get_new_crtc_state(state, crtc);
400 
401 	new_crtc_state->shared_dpll = NULL;
402 
403 	if (!old_crtc_state->shared_dpll)
404 		return;
405 
406 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
407 }
408 
409 /**
410  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
411  * @state: atomic state
412  *
413  * This is the dpll version of drm_atomic_helper_swap_state() since the
414  * helper does not handle driver-specific global state.
415  *
416  * For consistency with atomic helpers this function does a complete swap,
417  * i.e. it also puts the current state into @state, even though there is no
418  * need for that at this moment.
419  */
420 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
421 {
422 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
423 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
424 	enum intel_dpll_id i;
425 
426 	if (!state->dpll_set)
427 		return;
428 
429 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
430 		struct intel_shared_dpll *pll =
431 			&dev_priv->display.dpll.shared_dplls[i];
432 
433 		swap(pll->state, shared_dpll[i]);
434 	}
435 }
436 
437 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
438 				      struct intel_shared_dpll *pll,
439 				      struct intel_dpll_hw_state *hw_state)
440 {
441 	const enum intel_dpll_id id = pll->info->id;
442 	intel_wakeref_t wakeref;
443 	u32 val;
444 
445 	wakeref = intel_display_power_get_if_enabled(dev_priv,
446 						     POWER_DOMAIN_DISPLAY_CORE);
447 	if (!wakeref)
448 		return false;
449 
450 	val = intel_de_read(dev_priv, PCH_DPLL(id));
451 	hw_state->dpll = val;
452 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
453 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
454 
455 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
456 
457 	return val & DPLL_VCO_ENABLE;
458 }
459 
460 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
461 {
462 	u32 val;
463 	bool enabled;
464 
465 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
466 
467 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
468 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
469 			    DREF_SUPERSPREAD_SOURCE_MASK));
470 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
471 }
472 
473 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
474 				struct intel_shared_dpll *pll)
475 {
476 	const enum intel_dpll_id id = pll->info->id;
477 
478 	/* PCH refclock must be enabled first */
479 	ibx_assert_pch_refclk_enabled(dev_priv);
480 
481 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
482 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
483 
484 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
485 
486 	/* Wait for the clocks to stabilize. */
487 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
488 	udelay(150);
489 
490 	/* The pixel multiplier can only be updated once the
491 	 * DPLL is enabled and the clocks are stable.
492 	 *
493 	 * So write it again.
494 	 */
495 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
496 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
497 	udelay(200);
498 }
499 
500 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
501 				 struct intel_shared_dpll *pll)
502 {
503 	const enum intel_dpll_id id = pll->info->id;
504 
505 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
506 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
507 	udelay(200);
508 }
509 
510 static int ibx_compute_dpll(struct intel_atomic_state *state,
511 			    struct intel_crtc *crtc,
512 			    struct intel_encoder *encoder)
513 {
514 	return 0;
515 }
516 
517 static int ibx_get_dpll(struct intel_atomic_state *state,
518 			struct intel_crtc *crtc,
519 			struct intel_encoder *encoder)
520 {
521 	struct intel_crtc_state *crtc_state =
522 		intel_atomic_get_new_crtc_state(state, crtc);
523 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
524 	struct intel_shared_dpll *pll;
525 	enum intel_dpll_id i;
526 
527 	if (HAS_PCH_IBX(dev_priv)) {
528 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
529 		i = (enum intel_dpll_id) crtc->pipe;
530 		pll = &dev_priv->display.dpll.shared_dplls[i];
531 
532 		drm_dbg_kms(&dev_priv->drm,
533 			    "[CRTC:%d:%s] using pre-allocated %s\n",
534 			    crtc->base.base.id, crtc->base.name,
535 			    pll->info->name);
536 	} else {
537 		pll = intel_find_shared_dpll(state, crtc,
538 					     &crtc_state->dpll_hw_state,
539 					     BIT(DPLL_ID_PCH_PLL_B) |
540 					     BIT(DPLL_ID_PCH_PLL_A));
541 	}
542 
543 	if (!pll)
544 		return -EINVAL;
545 
546 	/* reference the pll */
547 	intel_reference_shared_dpll(state, crtc,
548 				    pll, &crtc_state->dpll_hw_state);
549 
550 	crtc_state->shared_dpll = pll;
551 
552 	return 0;
553 }
554 
555 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
556 			      const struct intel_dpll_hw_state *hw_state)
557 {
558 	drm_dbg_kms(&dev_priv->drm,
559 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
560 		    "fp0: 0x%x, fp1: 0x%x\n",
561 		    hw_state->dpll,
562 		    hw_state->dpll_md,
563 		    hw_state->fp0,
564 		    hw_state->fp1);
565 }
566 
567 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
568 	.enable = ibx_pch_dpll_enable,
569 	.disable = ibx_pch_dpll_disable,
570 	.get_hw_state = ibx_pch_dpll_get_hw_state,
571 };
572 
573 static const struct dpll_info pch_plls[] = {
574 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
575 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
576 	{ },
577 };
578 
579 static const struct intel_dpll_mgr pch_pll_mgr = {
580 	.dpll_info = pch_plls,
581 	.compute_dplls = ibx_compute_dpll,
582 	.get_dplls = ibx_get_dpll,
583 	.put_dplls = intel_put_dpll,
584 	.dump_hw_state = ibx_dump_hw_state,
585 };
586 
587 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
588 				 struct intel_shared_dpll *pll)
589 {
590 	const enum intel_dpll_id id = pll->info->id;
591 
592 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
593 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
594 	udelay(20);
595 }
596 
597 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
598 				struct intel_shared_dpll *pll)
599 {
600 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
601 	intel_de_posting_read(dev_priv, SPLL_CTL);
602 	udelay(20);
603 }
604 
605 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
606 				  struct intel_shared_dpll *pll)
607 {
608 	const enum intel_dpll_id id = pll->info->id;
609 	u32 val;
610 
611 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
612 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
613 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
614 
615 	/*
616 	 * Try to set up the PCH reference clock once all DPLLs
617 	 * that depend on it have been shut down.
618 	 */
619 	if (dev_priv->pch_ssc_use & BIT(id))
620 		intel_init_pch_refclk(dev_priv);
621 }
622 
623 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
624 				 struct intel_shared_dpll *pll)
625 {
626 	enum intel_dpll_id id = pll->info->id;
627 	u32 val;
628 
629 	val = intel_de_read(dev_priv, SPLL_CTL);
630 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
631 	intel_de_posting_read(dev_priv, SPLL_CTL);
632 
633 	/*
634 	 * Try to set up the PCH reference clock once all DPLLs
635 	 * that depend on it have been shut down.
636 	 */
637 	if (dev_priv->pch_ssc_use & BIT(id))
638 		intel_init_pch_refclk(dev_priv);
639 }
640 
641 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
642 				       struct intel_shared_dpll *pll,
643 				       struct intel_dpll_hw_state *hw_state)
644 {
645 	const enum intel_dpll_id id = pll->info->id;
646 	intel_wakeref_t wakeref;
647 	u32 val;
648 
649 	wakeref = intel_display_power_get_if_enabled(dev_priv,
650 						     POWER_DOMAIN_DISPLAY_CORE);
651 	if (!wakeref)
652 		return false;
653 
654 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
655 	hw_state->wrpll = val;
656 
657 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
658 
659 	return val & WRPLL_PLL_ENABLE;
660 }
661 
662 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
663 				      struct intel_shared_dpll *pll,
664 				      struct intel_dpll_hw_state *hw_state)
665 {
666 	intel_wakeref_t wakeref;
667 	u32 val;
668 
669 	wakeref = intel_display_power_get_if_enabled(dev_priv,
670 						     POWER_DOMAIN_DISPLAY_CORE);
671 	if (!wakeref)
672 		return false;
673 
674 	val = intel_de_read(dev_priv, SPLL_CTL);
675 	hw_state->spll = val;
676 
677 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
678 
679 	return val & SPLL_PLL_ENABLE;
680 }
681 
682 #define LC_FREQ 2700
683 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
684 
685 #define P_MIN 2
686 #define P_MAX 64
687 #define P_INC 2
688 
689 /* Constraints for PLL good behavior */
690 #define REF_MIN 48
691 #define REF_MAX 400
692 #define VCO_MIN 2400
693 #define VCO_MAX 4800
694 
695 struct hsw_wrpll_rnp {
696 	unsigned p, n2, r2;
697 };
698 
699 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
700 {
701 	switch (clock) {
702 	case 25175000:
703 	case 25200000:
704 	case 27000000:
705 	case 27027000:
706 	case 37762500:
707 	case 37800000:
708 	case 40500000:
709 	case 40541000:
710 	case 54000000:
711 	case 54054000:
712 	case 59341000:
713 	case 59400000:
714 	case 72000000:
715 	case 74176000:
716 	case 74250000:
717 	case 81000000:
718 	case 81081000:
719 	case 89012000:
720 	case 89100000:
721 	case 108000000:
722 	case 108108000:
723 	case 111264000:
724 	case 111375000:
725 	case 148352000:
726 	case 148500000:
727 	case 162000000:
728 	case 162162000:
729 	case 222525000:
730 	case 222750000:
731 	case 296703000:
732 	case 297000000:
733 		return 0;
734 	case 233500000:
735 	case 245250000:
736 	case 247750000:
737 	case 253250000:
738 	case 298000000:
739 		return 1500;
740 	case 169128000:
741 	case 169500000:
742 	case 179500000:
743 	case 202000000:
744 		return 2000;
745 	case 256250000:
746 	case 262500000:
747 	case 270000000:
748 	case 272500000:
749 	case 273750000:
750 	case 280750000:
751 	case 281250000:
752 	case 286000000:
753 	case 291750000:
754 		return 4000;
755 	case 267250000:
756 	case 268500000:
757 		return 5000;
758 	default:
759 		return 1000;
760 	}
761 }
762 
763 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
764 				 unsigned int r2, unsigned int n2,
765 				 unsigned int p,
766 				 struct hsw_wrpll_rnp *best)
767 {
768 	u64 a, b, c, d, diff, diff_best;
769 
770 	/* No best (r,n,p) yet */
771 	if (best->p == 0) {
772 		best->p = p;
773 		best->n2 = n2;
774 		best->r2 = r2;
775 		return;
776 	}
777 
778 	/*
779 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
780 	 * freq2k.
781 	 *
782 	 * delta = 1e6 *
783 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
784 	 *	   freq2k;
785 	 *
786 	 * and we would like delta <= budget.
787 	 *
788 	 * If the discrepancy is above the PPM-based budget, always prefer to
789 	 * improve upon the previous solution.  However, if you're within the
790 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
791 	 */
792 	a = freq2k * budget * p * r2;
793 	b = freq2k * budget * best->p * best->r2;
794 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
795 	diff_best = abs_diff(freq2k * best->p * best->r2,
796 			     LC_FREQ_2K * best->n2);
797 	c = 1000000 * diff;
798 	d = 1000000 * diff_best;
799 
800 	if (a < c && b < d) {
801 		/* If both are above the budget, pick the closer */
802 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
803 			best->p = p;
804 			best->n2 = n2;
805 			best->r2 = r2;
806 		}
807 	} else if (a >= c && b < d) {
808 		/* If A is below the threshold but B is above it?  Update. */
809 		best->p = p;
810 		best->n2 = n2;
811 		best->r2 = r2;
812 	} else if (a >= c && b >= d) {
813 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
814 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
815 			best->p = p;
816 			best->n2 = n2;
817 			best->r2 = r2;
818 		}
819 	}
820 	/* Otherwise a < c && b >= d, do nothing */
821 }
822 
823 static void
824 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
825 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
826 {
827 	u64 freq2k;
828 	unsigned p, n2, r2;
829 	struct hsw_wrpll_rnp best = {};
830 	unsigned budget;
831 
832 	freq2k = clock / 100;
833 
834 	budget = hsw_wrpll_get_budget_for_freq(clock);
835 
836 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
837 	 * and directly pass the LC PLL to it. */
838 	if (freq2k == 5400000) {
839 		*n2_out = 2;
840 		*p_out = 1;
841 		*r2_out = 2;
842 		return;
843 	}
844 
845 	/*
846 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
847 	 * the WR PLL.
848 	 *
849 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
850 	 * Injecting R2 = 2 * R gives:
851 	 *   REF_MAX * r2 > LC_FREQ * 2 and
852 	 *   REF_MIN * r2 < LC_FREQ * 2
853 	 *
854 	 * Which means the desired boundaries for r2 are:
855 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
856 	 *
857 	 */
858 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
859 	     r2 <= LC_FREQ * 2 / REF_MIN;
860 	     r2++) {
861 
862 		/*
863 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
864 		 *
865 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
866 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
867 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
868 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
869 		 *
870 		 * Which means the desired boundaries for n2 are:
871 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
872 		 */
873 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
874 		     n2 <= VCO_MAX * r2 / LC_FREQ;
875 		     n2++) {
876 
877 			for (p = P_MIN; p <= P_MAX; p += P_INC)
878 				hsw_wrpll_update_rnp(freq2k, budget,
879 						     r2, n2, p, &best);
880 		}
881 	}
882 
883 	*n2_out = best.n2;
884 	*p_out = best.p;
885 	*r2_out = best.r2;
886 }
887 
888 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
889 				  const struct intel_shared_dpll *pll,
890 				  const struct intel_dpll_hw_state *pll_state)
891 {
892 	int refclk;
893 	int n, p, r;
894 	u32 wrpll = pll_state->wrpll;
895 
896 	switch (wrpll & WRPLL_REF_MASK) {
897 	case WRPLL_REF_SPECIAL_HSW:
898 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
899 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
900 			refclk = dev_priv->display.dpll.ref_clks.nssc;
901 			break;
902 		}
903 		fallthrough;
904 	case WRPLL_REF_PCH_SSC:
905 		/*
906 		 * We could calculate spread here, but our checking
907 		 * code only cares about 5% accuracy, and spread is a max of
908 		 * 0.5% downspread.
909 		 */
910 		refclk = dev_priv->display.dpll.ref_clks.ssc;
911 		break;
912 	case WRPLL_REF_LCPLL:
913 		refclk = 2700000;
914 		break;
915 	default:
916 		MISSING_CASE(wrpll);
917 		return 0;
918 	}
919 
920 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
921 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
922 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
923 
924 	/* Convert to KHz, p & r have a fixed point portion */
925 	return (refclk * n / 10) / (p * r) * 2;
926 }
927 
928 static int
929 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
930 			   struct intel_crtc *crtc)
931 {
932 	struct drm_i915_private *i915 = to_i915(state->base.dev);
933 	struct intel_crtc_state *crtc_state =
934 		intel_atomic_get_new_crtc_state(state, crtc);
935 	unsigned int p, n2, r2;
936 
937 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
938 
939 	crtc_state->dpll_hw_state.wrpll =
940 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
941 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
942 		WRPLL_DIVIDER_POST(p);
943 
944 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
945 							&crtc_state->dpll_hw_state);
946 
947 	return 0;
948 }
949 
950 static struct intel_shared_dpll *
951 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
952 		       struct intel_crtc *crtc)
953 {
954 	struct intel_crtc_state *crtc_state =
955 		intel_atomic_get_new_crtc_state(state, crtc);
956 
957 	return intel_find_shared_dpll(state, crtc,
958 				      &crtc_state->dpll_hw_state,
959 				      BIT(DPLL_ID_WRPLL2) |
960 				      BIT(DPLL_ID_WRPLL1));
961 }
962 
963 static int
964 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
965 {
966 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
967 	int clock = crtc_state->port_clock;
968 
969 	switch (clock / 2) {
970 	case 81000:
971 	case 135000:
972 	case 270000:
973 		return 0;
974 	default:
975 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
976 			    clock);
977 		return -EINVAL;
978 	}
979 }
980 
981 static struct intel_shared_dpll *
982 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
983 {
984 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
985 	struct intel_shared_dpll *pll;
986 	enum intel_dpll_id pll_id;
987 	int clock = crtc_state->port_clock;
988 
989 	switch (clock / 2) {
990 	case 81000:
991 		pll_id = DPLL_ID_LCPLL_810;
992 		break;
993 	case 135000:
994 		pll_id = DPLL_ID_LCPLL_1350;
995 		break;
996 	case 270000:
997 		pll_id = DPLL_ID_LCPLL_2700;
998 		break;
999 	default:
1000 		MISSING_CASE(clock / 2);
1001 		return NULL;
1002 	}
1003 
1004 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1005 
1006 	if (!pll)
1007 		return NULL;
1008 
1009 	return pll;
1010 }
1011 
1012 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1013 				  const struct intel_shared_dpll *pll,
1014 				  const struct intel_dpll_hw_state *pll_state)
1015 {
1016 	int link_clock = 0;
1017 
1018 	switch (pll->info->id) {
1019 	case DPLL_ID_LCPLL_810:
1020 		link_clock = 81000;
1021 		break;
1022 	case DPLL_ID_LCPLL_1350:
1023 		link_clock = 135000;
1024 		break;
1025 	case DPLL_ID_LCPLL_2700:
1026 		link_clock = 270000;
1027 		break;
1028 	default:
1029 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1030 		break;
1031 	}
1032 
1033 	return link_clock * 2;
1034 }
1035 
1036 static int
1037 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1038 			  struct intel_crtc *crtc)
1039 {
1040 	struct intel_crtc_state *crtc_state =
1041 		intel_atomic_get_new_crtc_state(state, crtc);
1042 
1043 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1044 		return -EINVAL;
1045 
1046 	crtc_state->dpll_hw_state.spll =
1047 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1048 
1049 	return 0;
1050 }
1051 
1052 static struct intel_shared_dpll *
1053 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1054 		      struct intel_crtc *crtc)
1055 {
1056 	struct intel_crtc_state *crtc_state =
1057 		intel_atomic_get_new_crtc_state(state, crtc);
1058 
1059 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1060 				      BIT(DPLL_ID_SPLL));
1061 }
1062 
1063 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1064 				 const struct intel_shared_dpll *pll,
1065 				 const struct intel_dpll_hw_state *pll_state)
1066 {
1067 	int link_clock = 0;
1068 
1069 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1070 	case SPLL_FREQ_810MHz:
1071 		link_clock = 81000;
1072 		break;
1073 	case SPLL_FREQ_1350MHz:
1074 		link_clock = 135000;
1075 		break;
1076 	case SPLL_FREQ_2700MHz:
1077 		link_clock = 270000;
1078 		break;
1079 	default:
1080 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1081 		break;
1082 	}
1083 
1084 	return link_clock * 2;
1085 }
1086 
1087 static int hsw_compute_dpll(struct intel_atomic_state *state,
1088 			    struct intel_crtc *crtc,
1089 			    struct intel_encoder *encoder)
1090 {
1091 	struct intel_crtc_state *crtc_state =
1092 		intel_atomic_get_new_crtc_state(state, crtc);
1093 
1094 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1095 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1096 	else if (intel_crtc_has_dp_encoder(crtc_state))
1097 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1098 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1099 		return hsw_ddi_spll_compute_dpll(state, crtc);
1100 	else
1101 		return -EINVAL;
1102 }
1103 
1104 static int hsw_get_dpll(struct intel_atomic_state *state,
1105 			struct intel_crtc *crtc,
1106 			struct intel_encoder *encoder)
1107 {
1108 	struct intel_crtc_state *crtc_state =
1109 		intel_atomic_get_new_crtc_state(state, crtc);
1110 	struct intel_shared_dpll *pll = NULL;
1111 
1112 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1113 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1114 	else if (intel_crtc_has_dp_encoder(crtc_state))
1115 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1116 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1117 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1118 
1119 	if (!pll)
1120 		return -EINVAL;
1121 
1122 	intel_reference_shared_dpll(state, crtc,
1123 				    pll, &crtc_state->dpll_hw_state);
1124 
1125 	crtc_state->shared_dpll = pll;
1126 
1127 	return 0;
1128 }
1129 
1130 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1131 {
1132 	i915->display.dpll.ref_clks.ssc = 135000;
1133 	/* Non-SSC is only used on non-ULT HSW. */
1134 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1135 		i915->display.dpll.ref_clks.nssc = 24000;
1136 	else
1137 		i915->display.dpll.ref_clks.nssc = 135000;
1138 }
1139 
1140 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1141 			      const struct intel_dpll_hw_state *hw_state)
1142 {
1143 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1144 		    hw_state->wrpll, hw_state->spll);
1145 }
1146 
1147 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1148 	.enable = hsw_ddi_wrpll_enable,
1149 	.disable = hsw_ddi_wrpll_disable,
1150 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1151 	.get_freq = hsw_ddi_wrpll_get_freq,
1152 };
1153 
1154 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1155 	.enable = hsw_ddi_spll_enable,
1156 	.disable = hsw_ddi_spll_disable,
1157 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1158 	.get_freq = hsw_ddi_spll_get_freq,
1159 };
1160 
1161 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1162 				 struct intel_shared_dpll *pll)
1163 {
1164 }
1165 
1166 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1167 				  struct intel_shared_dpll *pll)
1168 {
1169 }
1170 
1171 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1172 				       struct intel_shared_dpll *pll,
1173 				       struct intel_dpll_hw_state *hw_state)
1174 {
1175 	return true;
1176 }
1177 
1178 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1179 	.enable = hsw_ddi_lcpll_enable,
1180 	.disable = hsw_ddi_lcpll_disable,
1181 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1182 	.get_freq = hsw_ddi_lcpll_get_freq,
1183 };
1184 
1185 static const struct dpll_info hsw_plls[] = {
1186 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1187 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1188 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1189 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1190 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1191 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1192 	{ },
1193 };
1194 
1195 static const struct intel_dpll_mgr hsw_pll_mgr = {
1196 	.dpll_info = hsw_plls,
1197 	.compute_dplls = hsw_compute_dpll,
1198 	.get_dplls = hsw_get_dpll,
1199 	.put_dplls = intel_put_dpll,
1200 	.update_ref_clks = hsw_update_dpll_ref_clks,
1201 	.dump_hw_state = hsw_dump_hw_state,
1202 };
1203 
1204 struct skl_dpll_regs {
1205 	i915_reg_t ctl, cfgcr1, cfgcr2;
1206 };
1207 
1208 /* this array is indexed by the *shared* pll id */
1209 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1210 	{
1211 		/* DPLL 0 */
1212 		.ctl = LCPLL1_CTL,
1213 		/* DPLL 0 doesn't support HDMI mode */
1214 	},
1215 	{
1216 		/* DPLL 1 */
1217 		.ctl = LCPLL2_CTL,
1218 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1219 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1220 	},
1221 	{
1222 		/* DPLL 2 */
1223 		.ctl = WRPLL_CTL(0),
1224 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1225 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1226 	},
1227 	{
1228 		/* DPLL 3 */
1229 		.ctl = WRPLL_CTL(1),
1230 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1231 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1232 	},
1233 };
1234 
1235 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1236 				    struct intel_shared_dpll *pll)
1237 {
1238 	const enum intel_dpll_id id = pll->info->id;
1239 	u32 val;
1240 
1241 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1242 
1243 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1244 		 DPLL_CTRL1_SSC(id) |
1245 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1246 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1247 
1248 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1249 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1250 }
1251 
1252 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1253 			       struct intel_shared_dpll *pll)
1254 {
1255 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1256 	const enum intel_dpll_id id = pll->info->id;
1257 
1258 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1259 
1260 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1261 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1262 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1263 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1264 
1265 	/* the enable bit is always bit 31 */
1266 	intel_de_write(dev_priv, regs[id].ctl,
1267 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1268 
1269 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1270 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1271 }
1272 
1273 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1274 				 struct intel_shared_dpll *pll)
1275 {
1276 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1277 }
1278 
1279 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1280 				struct intel_shared_dpll *pll)
1281 {
1282 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1283 	const enum intel_dpll_id id = pll->info->id;
1284 
1285 	/* the enable bit is always bit 31 */
1286 	intel_de_write(dev_priv, regs[id].ctl,
1287 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1288 	intel_de_posting_read(dev_priv, regs[id].ctl);
1289 }
1290 
1291 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1292 				  struct intel_shared_dpll *pll)
1293 {
1294 }
1295 
1296 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1297 				     struct intel_shared_dpll *pll,
1298 				     struct intel_dpll_hw_state *hw_state)
1299 {
1300 	u32 val;
1301 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1302 	const enum intel_dpll_id id = pll->info->id;
1303 	intel_wakeref_t wakeref;
1304 	bool ret;
1305 
1306 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1307 						     POWER_DOMAIN_DISPLAY_CORE);
1308 	if (!wakeref)
1309 		return false;
1310 
1311 	ret = false;
1312 
1313 	val = intel_de_read(dev_priv, regs[id].ctl);
1314 	if (!(val & LCPLL_PLL_ENABLE))
1315 		goto out;
1316 
1317 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1318 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1319 
1320 	/* avoid reading back stale values if HDMI mode is not enabled */
1321 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1322 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1323 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1324 	}
1325 	ret = true;
1326 
1327 out:
1328 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1329 
1330 	return ret;
1331 }
1332 
1333 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1334 				       struct intel_shared_dpll *pll,
1335 				       struct intel_dpll_hw_state *hw_state)
1336 {
1337 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1338 	const enum intel_dpll_id id = pll->info->id;
1339 	intel_wakeref_t wakeref;
1340 	u32 val;
1341 	bool ret;
1342 
1343 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1344 						     POWER_DOMAIN_DISPLAY_CORE);
1345 	if (!wakeref)
1346 		return false;
1347 
1348 	ret = false;
1349 
1350 	/* DPLL0 is always enabled since it drives CDCLK */
1351 	val = intel_de_read(dev_priv, regs[id].ctl);
1352 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1353 		goto out;
1354 
1355 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1356 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1357 
1358 	ret = true;
1359 
1360 out:
1361 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1362 
1363 	return ret;
1364 }
1365 
1366 struct skl_wrpll_context {
1367 	u64 min_deviation;		/* current minimal deviation */
1368 	u64 central_freq;		/* chosen central freq */
1369 	u64 dco_freq;			/* chosen dco freq */
1370 	unsigned int p;			/* chosen divider */
1371 };
1372 
1373 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1374 #define SKL_DCO_MAX_PDEVIATION	100
1375 #define SKL_DCO_MAX_NDEVIATION	600
1376 
1377 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1378 				  u64 central_freq,
1379 				  u64 dco_freq,
1380 				  unsigned int divider)
1381 {
1382 	u64 deviation;
1383 
1384 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1385 			      central_freq);
1386 
1387 	/* positive deviation */
1388 	if (dco_freq >= central_freq) {
1389 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1390 		    deviation < ctx->min_deviation) {
1391 			ctx->min_deviation = deviation;
1392 			ctx->central_freq = central_freq;
1393 			ctx->dco_freq = dco_freq;
1394 			ctx->p = divider;
1395 		}
1396 	/* negative deviation */
1397 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1398 		   deviation < ctx->min_deviation) {
1399 		ctx->min_deviation = deviation;
1400 		ctx->central_freq = central_freq;
1401 		ctx->dco_freq = dco_freq;
1402 		ctx->p = divider;
1403 	}
1404 }
1405 
1406 static void skl_wrpll_get_multipliers(unsigned int p,
1407 				      unsigned int *p0 /* out */,
1408 				      unsigned int *p1 /* out */,
1409 				      unsigned int *p2 /* out */)
1410 {
1411 	/* even dividers */
1412 	if (p % 2 == 0) {
1413 		unsigned int half = p / 2;
1414 
1415 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1416 			*p0 = 2;
1417 			*p1 = 1;
1418 			*p2 = half;
1419 		} else if (half % 2 == 0) {
1420 			*p0 = 2;
1421 			*p1 = half / 2;
1422 			*p2 = 2;
1423 		} else if (half % 3 == 0) {
1424 			*p0 = 3;
1425 			*p1 = half / 3;
1426 			*p2 = 2;
1427 		} else if (half % 7 == 0) {
1428 			*p0 = 7;
1429 			*p1 = half / 7;
1430 			*p2 = 2;
1431 		}
1432 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1433 		*p0 = 3;
1434 		*p1 = 1;
1435 		*p2 = p / 3;
1436 	} else if (p == 5 || p == 7) {
1437 		*p0 = p;
1438 		*p1 = 1;
1439 		*p2 = 1;
1440 	} else if (p == 15) {
1441 		*p0 = 3;
1442 		*p1 = 1;
1443 		*p2 = 5;
1444 	} else if (p == 21) {
1445 		*p0 = 7;
1446 		*p1 = 1;
1447 		*p2 = 3;
1448 	} else if (p == 35) {
1449 		*p0 = 7;
1450 		*p1 = 1;
1451 		*p2 = 5;
1452 	}
1453 }
1454 
1455 struct skl_wrpll_params {
1456 	u32 dco_fraction;
1457 	u32 dco_integer;
1458 	u32 qdiv_ratio;
1459 	u32 qdiv_mode;
1460 	u32 kdiv;
1461 	u32 pdiv;
1462 	u32 central_freq;
1463 };
1464 
1465 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1466 				      u64 afe_clock,
1467 				      int ref_clock,
1468 				      u64 central_freq,
1469 				      u32 p0, u32 p1, u32 p2)
1470 {
1471 	u64 dco_freq;
1472 
1473 	switch (central_freq) {
1474 	case 9600000000ULL:
1475 		params->central_freq = 0;
1476 		break;
1477 	case 9000000000ULL:
1478 		params->central_freq = 1;
1479 		break;
1480 	case 8400000000ULL:
1481 		params->central_freq = 3;
1482 	}
1483 
1484 	switch (p0) {
1485 	case 1:
1486 		params->pdiv = 0;
1487 		break;
1488 	case 2:
1489 		params->pdiv = 1;
1490 		break;
1491 	case 3:
1492 		params->pdiv = 2;
1493 		break;
1494 	case 7:
1495 		params->pdiv = 4;
1496 		break;
1497 	default:
1498 		WARN(1, "Incorrect PDiv\n");
1499 	}
1500 
1501 	switch (p2) {
1502 	case 5:
1503 		params->kdiv = 0;
1504 		break;
1505 	case 2:
1506 		params->kdiv = 1;
1507 		break;
1508 	case 3:
1509 		params->kdiv = 2;
1510 		break;
1511 	case 1:
1512 		params->kdiv = 3;
1513 		break;
1514 	default:
1515 		WARN(1, "Incorrect KDiv\n");
1516 	}
1517 
1518 	params->qdiv_ratio = p1;
1519 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1520 
1521 	dco_freq = p0 * p1 * p2 * afe_clock;
1522 
1523 	/*
1524 	 * Intermediate values are in Hz.
1525 	 * Divide by MHz to match bsepc
1526 	 */
1527 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1528 	params->dco_fraction =
1529 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1530 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1531 }
1532 
1533 static int
1534 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1535 			int ref_clock,
1536 			struct skl_wrpll_params *wrpll_params)
1537 {
1538 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1539 						 9000000000ULL,
1540 						 9600000000ULL };
1541 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1542 					    24, 28, 30, 32, 36, 40, 42, 44,
1543 					    48, 52, 54, 56, 60, 64, 66, 68,
1544 					    70, 72, 76, 78, 80, 84, 88, 90,
1545 					    92, 96, 98 };
1546 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1547 	static const struct {
1548 		const u8 *list;
1549 		int n_dividers;
1550 	} dividers[] = {
1551 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1552 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1553 	};
1554 	struct skl_wrpll_context ctx = {
1555 		.min_deviation = U64_MAX,
1556 	};
1557 	unsigned int dco, d, i;
1558 	unsigned int p0, p1, p2;
1559 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1560 
1561 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1562 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1563 			for (i = 0; i < dividers[d].n_dividers; i++) {
1564 				unsigned int p = dividers[d].list[i];
1565 				u64 dco_freq = p * afe_clock;
1566 
1567 				skl_wrpll_try_divider(&ctx,
1568 						      dco_central_freq[dco],
1569 						      dco_freq,
1570 						      p);
1571 				/*
1572 				 * Skip the remaining dividers if we're sure to
1573 				 * have found the definitive divider, we can't
1574 				 * improve a 0 deviation.
1575 				 */
1576 				if (ctx.min_deviation == 0)
1577 					goto skip_remaining_dividers;
1578 			}
1579 		}
1580 
1581 skip_remaining_dividers:
1582 		/*
1583 		 * If a solution is found with an even divider, prefer
1584 		 * this one.
1585 		 */
1586 		if (d == 0 && ctx.p)
1587 			break;
1588 	}
1589 
1590 	if (!ctx.p)
1591 		return -EINVAL;
1592 
1593 	/*
1594 	 * gcc incorrectly analyses that these can be used without being
1595 	 * initialized. To be fair, it's hard to guess.
1596 	 */
1597 	p0 = p1 = p2 = 0;
1598 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1599 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1600 				  ctx.central_freq, p0, p1, p2);
1601 
1602 	return 0;
1603 }
1604 
1605 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1606 				  const struct intel_shared_dpll *pll,
1607 				  const struct intel_dpll_hw_state *pll_state)
1608 {
1609 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1610 	u32 p0, p1, p2, dco_freq;
1611 
1612 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1613 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1614 
1615 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1616 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1617 	else
1618 		p1 = 1;
1619 
1620 
1621 	switch (p0) {
1622 	case DPLL_CFGCR2_PDIV_1:
1623 		p0 = 1;
1624 		break;
1625 	case DPLL_CFGCR2_PDIV_2:
1626 		p0 = 2;
1627 		break;
1628 	case DPLL_CFGCR2_PDIV_3:
1629 		p0 = 3;
1630 		break;
1631 	case DPLL_CFGCR2_PDIV_7_INVALID:
1632 		/*
1633 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1634 		 * handling it the same way as PDIV_7.
1635 		 */
1636 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1637 		fallthrough;
1638 	case DPLL_CFGCR2_PDIV_7:
1639 		p0 = 7;
1640 		break;
1641 	default:
1642 		MISSING_CASE(p0);
1643 		return 0;
1644 	}
1645 
1646 	switch (p2) {
1647 	case DPLL_CFGCR2_KDIV_5:
1648 		p2 = 5;
1649 		break;
1650 	case DPLL_CFGCR2_KDIV_2:
1651 		p2 = 2;
1652 		break;
1653 	case DPLL_CFGCR2_KDIV_3:
1654 		p2 = 3;
1655 		break;
1656 	case DPLL_CFGCR2_KDIV_1:
1657 		p2 = 1;
1658 		break;
1659 	default:
1660 		MISSING_CASE(p2);
1661 		return 0;
1662 	}
1663 
1664 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1665 		   ref_clock;
1666 
1667 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1668 		    ref_clock / 0x8000;
1669 
1670 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1671 		return 0;
1672 
1673 	return dco_freq / (p0 * p1 * p2 * 5);
1674 }
1675 
1676 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1677 {
1678 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1679 	struct skl_wrpll_params wrpll_params = {};
1680 	u32 ctrl1, cfgcr1, cfgcr2;
1681 	int ret;
1682 
1683 	/*
1684 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1685 	 * as the DPLL id in this function.
1686 	 */
1687 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1688 
1689 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1690 
1691 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1692 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1693 	if (ret)
1694 		return ret;
1695 
1696 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1697 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1698 		wrpll_params.dco_integer;
1699 
1700 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1701 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1702 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1703 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1704 		wrpll_params.central_freq;
1705 
1706 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1707 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1708 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1709 
1710 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1711 							&crtc_state->dpll_hw_state);
1712 
1713 	return 0;
1714 }
1715 
1716 static int
1717 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1718 {
1719 	u32 ctrl1;
1720 
1721 	/*
1722 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1723 	 * as the DPLL id in this function.
1724 	 */
1725 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1726 	switch (crtc_state->port_clock / 2) {
1727 	case 81000:
1728 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1729 		break;
1730 	case 135000:
1731 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1732 		break;
1733 	case 270000:
1734 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1735 		break;
1736 		/* eDP 1.4 rates */
1737 	case 162000:
1738 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1739 		break;
1740 	case 108000:
1741 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1742 		break;
1743 	case 216000:
1744 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1745 		break;
1746 	}
1747 
1748 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1749 
1750 	return 0;
1751 }
1752 
1753 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1754 				  const struct intel_shared_dpll *pll,
1755 				  const struct intel_dpll_hw_state *pll_state)
1756 {
1757 	int link_clock = 0;
1758 
1759 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1760 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1761 	case DPLL_CTRL1_LINK_RATE_810:
1762 		link_clock = 81000;
1763 		break;
1764 	case DPLL_CTRL1_LINK_RATE_1080:
1765 		link_clock = 108000;
1766 		break;
1767 	case DPLL_CTRL1_LINK_RATE_1350:
1768 		link_clock = 135000;
1769 		break;
1770 	case DPLL_CTRL1_LINK_RATE_1620:
1771 		link_clock = 162000;
1772 		break;
1773 	case DPLL_CTRL1_LINK_RATE_2160:
1774 		link_clock = 216000;
1775 		break;
1776 	case DPLL_CTRL1_LINK_RATE_2700:
1777 		link_clock = 270000;
1778 		break;
1779 	default:
1780 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1781 		break;
1782 	}
1783 
1784 	return link_clock * 2;
1785 }
1786 
1787 static int skl_compute_dpll(struct intel_atomic_state *state,
1788 			    struct intel_crtc *crtc,
1789 			    struct intel_encoder *encoder)
1790 {
1791 	struct intel_crtc_state *crtc_state =
1792 		intel_atomic_get_new_crtc_state(state, crtc);
1793 
1794 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1795 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1796 	else if (intel_crtc_has_dp_encoder(crtc_state))
1797 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1798 	else
1799 		return -EINVAL;
1800 }
1801 
1802 static int skl_get_dpll(struct intel_atomic_state *state,
1803 			struct intel_crtc *crtc,
1804 			struct intel_encoder *encoder)
1805 {
1806 	struct intel_crtc_state *crtc_state =
1807 		intel_atomic_get_new_crtc_state(state, crtc);
1808 	struct intel_shared_dpll *pll;
1809 
1810 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1811 		pll = intel_find_shared_dpll(state, crtc,
1812 					     &crtc_state->dpll_hw_state,
1813 					     BIT(DPLL_ID_SKL_DPLL0));
1814 	else
1815 		pll = intel_find_shared_dpll(state, crtc,
1816 					     &crtc_state->dpll_hw_state,
1817 					     BIT(DPLL_ID_SKL_DPLL3) |
1818 					     BIT(DPLL_ID_SKL_DPLL2) |
1819 					     BIT(DPLL_ID_SKL_DPLL1));
1820 	if (!pll)
1821 		return -EINVAL;
1822 
1823 	intel_reference_shared_dpll(state, crtc,
1824 				    pll, &crtc_state->dpll_hw_state);
1825 
1826 	crtc_state->shared_dpll = pll;
1827 
1828 	return 0;
1829 }
1830 
1831 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1832 				const struct intel_shared_dpll *pll,
1833 				const struct intel_dpll_hw_state *pll_state)
1834 {
1835 	/*
1836 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1837 	 * the internal shift for each field
1838 	 */
1839 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1840 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1841 	else
1842 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1843 }
1844 
1845 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1846 {
1847 	/* No SSC ref */
1848 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1849 }
1850 
1851 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1852 			      const struct intel_dpll_hw_state *hw_state)
1853 {
1854 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1855 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1856 		      hw_state->ctrl1,
1857 		      hw_state->cfgcr1,
1858 		      hw_state->cfgcr2);
1859 }
1860 
1861 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1862 	.enable = skl_ddi_pll_enable,
1863 	.disable = skl_ddi_pll_disable,
1864 	.get_hw_state = skl_ddi_pll_get_hw_state,
1865 	.get_freq = skl_ddi_pll_get_freq,
1866 };
1867 
1868 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1869 	.enable = skl_ddi_dpll0_enable,
1870 	.disable = skl_ddi_dpll0_disable,
1871 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1872 	.get_freq = skl_ddi_pll_get_freq,
1873 };
1874 
1875 static const struct dpll_info skl_plls[] = {
1876 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1877 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1878 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1879 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1880 	{ },
1881 };
1882 
1883 static const struct intel_dpll_mgr skl_pll_mgr = {
1884 	.dpll_info = skl_plls,
1885 	.compute_dplls = skl_compute_dpll,
1886 	.get_dplls = skl_get_dpll,
1887 	.put_dplls = intel_put_dpll,
1888 	.update_ref_clks = skl_update_dpll_ref_clks,
1889 	.dump_hw_state = skl_dump_hw_state,
1890 };
1891 
1892 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1893 				struct intel_shared_dpll *pll)
1894 {
1895 	u32 temp;
1896 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1897 	enum dpio_phy phy;
1898 	enum dpio_channel ch;
1899 
1900 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1901 
1902 	/* Non-SSC reference */
1903 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1904 	temp |= PORT_PLL_REF_SEL;
1905 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1906 
1907 	if (IS_GEMINILAKE(dev_priv)) {
1908 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1909 		temp |= PORT_PLL_POWER_ENABLE;
1910 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1911 
1912 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1913 				 PORT_PLL_POWER_STATE), 200))
1914 			drm_err(&dev_priv->drm,
1915 				"Power state not set for PLL:%d\n", port);
1916 	}
1917 
1918 	/* Disable 10 bit clock */
1919 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1920 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1921 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1922 
1923 	/* Write P1 & P2 */
1924 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1925 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1926 	temp |= pll->state.hw_state.ebb0;
1927 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1928 
1929 	/* Write M2 integer */
1930 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1931 	temp &= ~PORT_PLL_M2_INT_MASK;
1932 	temp |= pll->state.hw_state.pll0;
1933 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1934 
1935 	/* Write N */
1936 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1937 	temp &= ~PORT_PLL_N_MASK;
1938 	temp |= pll->state.hw_state.pll1;
1939 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1940 
1941 	/* Write M2 fraction */
1942 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1943 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1944 	temp |= pll->state.hw_state.pll2;
1945 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1946 
1947 	/* Write M2 fraction enable */
1948 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1949 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1950 	temp |= pll->state.hw_state.pll3;
1951 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1952 
1953 	/* Write coeff */
1954 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1955 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1956 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1957 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1958 	temp |= pll->state.hw_state.pll6;
1959 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1960 
1961 	/* Write calibration val */
1962 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1963 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1964 	temp |= pll->state.hw_state.pll8;
1965 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1966 
1967 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1968 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1969 	temp |= pll->state.hw_state.pll9;
1970 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1971 
1972 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1973 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1974 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1975 	temp |= pll->state.hw_state.pll10;
1976 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1977 
1978 	/* Recalibrate with new settings */
1979 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1980 	temp |= PORT_PLL_RECALIBRATE;
1981 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1982 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1983 	temp |= pll->state.hw_state.ebb4;
1984 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1985 
1986 	/* Enable PLL */
1987 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1988 	temp |= PORT_PLL_ENABLE;
1989 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1990 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1991 
1992 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1993 			200))
1994 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1995 
1996 	if (IS_GEMINILAKE(dev_priv)) {
1997 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1998 		temp |= DCC_DELAY_RANGE_2;
1999 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2000 	}
2001 
2002 	/*
2003 	 * While we write to the group register to program all lanes at once we
2004 	 * can read only lane registers and we pick lanes 0/1 for that.
2005 	 */
2006 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2007 	temp &= ~LANE_STAGGER_MASK;
2008 	temp &= ~LANESTAGGER_STRAP_OVRD;
2009 	temp |= pll->state.hw_state.pcsdw12;
2010 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2011 }
2012 
2013 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2014 					struct intel_shared_dpll *pll)
2015 {
2016 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2017 	u32 temp;
2018 
2019 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2020 	temp &= ~PORT_PLL_ENABLE;
2021 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2022 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2023 
2024 	if (IS_GEMINILAKE(dev_priv)) {
2025 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2026 		temp &= ~PORT_PLL_POWER_ENABLE;
2027 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2028 
2029 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2030 				  PORT_PLL_POWER_STATE), 200))
2031 			drm_err(&dev_priv->drm,
2032 				"Power state not reset for PLL:%d\n", port);
2033 	}
2034 }
2035 
2036 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2037 					struct intel_shared_dpll *pll,
2038 					struct intel_dpll_hw_state *hw_state)
2039 {
2040 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2041 	intel_wakeref_t wakeref;
2042 	enum dpio_phy phy;
2043 	enum dpio_channel ch;
2044 	u32 val;
2045 	bool ret;
2046 
2047 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2048 
2049 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2050 						     POWER_DOMAIN_DISPLAY_CORE);
2051 	if (!wakeref)
2052 		return false;
2053 
2054 	ret = false;
2055 
2056 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2057 	if (!(val & PORT_PLL_ENABLE))
2058 		goto out;
2059 
2060 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2061 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2062 
2063 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2064 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2065 
2066 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2067 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2068 
2069 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2070 	hw_state->pll1 &= PORT_PLL_N_MASK;
2071 
2072 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2073 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2074 
2075 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2076 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2077 
2078 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2079 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2080 			  PORT_PLL_INT_COEFF_MASK |
2081 			  PORT_PLL_GAIN_CTL_MASK;
2082 
2083 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2084 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2085 
2086 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2087 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2088 
2089 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2090 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2091 			   PORT_PLL_DCO_AMP_MASK;
2092 
2093 	/*
2094 	 * While we write to the group register to program all lanes at once we
2095 	 * can read only lane registers. We configure all lanes the same way, so
2096 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2097 	 */
2098 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2099 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2100 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2101 		drm_dbg(&dev_priv->drm,
2102 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2103 			hw_state->pcsdw12,
2104 			intel_de_read(dev_priv,
2105 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2106 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2107 
2108 	ret = true;
2109 
2110 out:
2111 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2112 
2113 	return ret;
2114 }
2115 
2116 /* pre-calculated values for DP linkrates */
2117 static const struct dpll bxt_dp_clk_val[] = {
2118 	/* m2 is .22 binary fixed point */
2119 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2120 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2121 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2122 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2123 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2124 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2125 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2126 };
2127 
2128 static int
2129 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2130 			  struct dpll *clk_div)
2131 {
2132 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2133 
2134 	/* Calculate HDMI div */
2135 	/*
2136 	 * FIXME: tie the following calculation into
2137 	 * i9xx_crtc_compute_clock
2138 	 */
2139 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2140 		return -EINVAL;
2141 
2142 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2143 
2144 	return 0;
2145 }
2146 
2147 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2148 				    struct dpll *clk_div)
2149 {
2150 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2151 	int i;
2152 
2153 	*clk_div = bxt_dp_clk_val[0];
2154 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2155 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2156 			*clk_div = bxt_dp_clk_val[i];
2157 			break;
2158 		}
2159 	}
2160 
2161 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2162 
2163 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2164 		    clk_div->dot != crtc_state->port_clock);
2165 }
2166 
2167 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2168 				     const struct dpll *clk_div)
2169 {
2170 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2171 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2172 	int clock = crtc_state->port_clock;
2173 	int vco = clk_div->vco;
2174 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2175 	u32 lanestagger;
2176 
2177 	if (vco >= 6200000 && vco <= 6700000) {
2178 		prop_coef = 4;
2179 		int_coef = 9;
2180 		gain_ctl = 3;
2181 		targ_cnt = 8;
2182 	} else if ((vco > 5400000 && vco < 6200000) ||
2183 			(vco >= 4800000 && vco < 5400000)) {
2184 		prop_coef = 5;
2185 		int_coef = 11;
2186 		gain_ctl = 3;
2187 		targ_cnt = 9;
2188 	} else if (vco == 5400000) {
2189 		prop_coef = 3;
2190 		int_coef = 8;
2191 		gain_ctl = 1;
2192 		targ_cnt = 9;
2193 	} else {
2194 		drm_err(&i915->drm, "Invalid VCO\n");
2195 		return -EINVAL;
2196 	}
2197 
2198 	if (clock > 270000)
2199 		lanestagger = 0x18;
2200 	else if (clock > 135000)
2201 		lanestagger = 0x0d;
2202 	else if (clock > 67000)
2203 		lanestagger = 0x07;
2204 	else if (clock > 33000)
2205 		lanestagger = 0x04;
2206 	else
2207 		lanestagger = 0x02;
2208 
2209 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2210 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2211 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2212 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2213 
2214 	if (clk_div->m2 & 0x3fffff)
2215 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2216 
2217 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2218 		PORT_PLL_INT_COEFF(int_coef) |
2219 		PORT_PLL_GAIN_CTL(gain_ctl);
2220 
2221 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2222 
2223 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2224 
2225 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2226 		PORT_PLL_DCO_AMP_OVR_EN_H;
2227 
2228 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2229 
2230 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2231 
2232 	return 0;
2233 }
2234 
2235 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2236 				const struct intel_shared_dpll *pll,
2237 				const struct intel_dpll_hw_state *pll_state)
2238 {
2239 	struct dpll clock;
2240 
2241 	clock.m1 = 2;
2242 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2243 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2244 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2245 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2246 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2247 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2248 
2249 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2250 }
2251 
2252 static int
2253 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2254 {
2255 	struct dpll clk_div = {};
2256 
2257 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2258 
2259 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2260 }
2261 
2262 static int
2263 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2264 {
2265 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2266 	struct dpll clk_div = {};
2267 	int ret;
2268 
2269 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2270 
2271 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2272 	if (ret)
2273 		return ret;
2274 
2275 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2276 						      &crtc_state->dpll_hw_state);
2277 
2278 	return 0;
2279 }
2280 
2281 static int bxt_compute_dpll(struct intel_atomic_state *state,
2282 			    struct intel_crtc *crtc,
2283 			    struct intel_encoder *encoder)
2284 {
2285 	struct intel_crtc_state *crtc_state =
2286 		intel_atomic_get_new_crtc_state(state, crtc);
2287 
2288 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2289 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2290 	else if (intel_crtc_has_dp_encoder(crtc_state))
2291 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2292 	else
2293 		return -EINVAL;
2294 }
2295 
2296 static int bxt_get_dpll(struct intel_atomic_state *state,
2297 			struct intel_crtc *crtc,
2298 			struct intel_encoder *encoder)
2299 {
2300 	struct intel_crtc_state *crtc_state =
2301 		intel_atomic_get_new_crtc_state(state, crtc);
2302 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2303 	struct intel_shared_dpll *pll;
2304 	enum intel_dpll_id id;
2305 
2306 	/* 1:1 mapping between ports and PLLs */
2307 	id = (enum intel_dpll_id) encoder->port;
2308 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2309 
2310 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2311 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2312 
2313 	intel_reference_shared_dpll(state, crtc,
2314 				    pll, &crtc_state->dpll_hw_state);
2315 
2316 	crtc_state->shared_dpll = pll;
2317 
2318 	return 0;
2319 }
2320 
2321 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2322 {
2323 	i915->display.dpll.ref_clks.ssc = 100000;
2324 	i915->display.dpll.ref_clks.nssc = 100000;
2325 	/* DSI non-SSC ref 19.2MHz */
2326 }
2327 
2328 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2329 			      const struct intel_dpll_hw_state *hw_state)
2330 {
2331 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2332 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2333 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2334 		    hw_state->ebb0,
2335 		    hw_state->ebb4,
2336 		    hw_state->pll0,
2337 		    hw_state->pll1,
2338 		    hw_state->pll2,
2339 		    hw_state->pll3,
2340 		    hw_state->pll6,
2341 		    hw_state->pll8,
2342 		    hw_state->pll9,
2343 		    hw_state->pll10,
2344 		    hw_state->pcsdw12);
2345 }
2346 
2347 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2348 	.enable = bxt_ddi_pll_enable,
2349 	.disable = bxt_ddi_pll_disable,
2350 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2351 	.get_freq = bxt_ddi_pll_get_freq,
2352 };
2353 
2354 static const struct dpll_info bxt_plls[] = {
2355 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2356 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2357 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2358 	{ },
2359 };
2360 
2361 static const struct intel_dpll_mgr bxt_pll_mgr = {
2362 	.dpll_info = bxt_plls,
2363 	.compute_dplls = bxt_compute_dpll,
2364 	.get_dplls = bxt_get_dpll,
2365 	.put_dplls = intel_put_dpll,
2366 	.update_ref_clks = bxt_update_dpll_ref_clks,
2367 	.dump_hw_state = bxt_dump_hw_state,
2368 };
2369 
2370 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2371 				      int *qdiv, int *kdiv)
2372 {
2373 	/* even dividers */
2374 	if (bestdiv % 2 == 0) {
2375 		if (bestdiv == 2) {
2376 			*pdiv = 2;
2377 			*qdiv = 1;
2378 			*kdiv = 1;
2379 		} else if (bestdiv % 4 == 0) {
2380 			*pdiv = 2;
2381 			*qdiv = bestdiv / 4;
2382 			*kdiv = 2;
2383 		} else if (bestdiv % 6 == 0) {
2384 			*pdiv = 3;
2385 			*qdiv = bestdiv / 6;
2386 			*kdiv = 2;
2387 		} else if (bestdiv % 5 == 0) {
2388 			*pdiv = 5;
2389 			*qdiv = bestdiv / 10;
2390 			*kdiv = 2;
2391 		} else if (bestdiv % 14 == 0) {
2392 			*pdiv = 7;
2393 			*qdiv = bestdiv / 14;
2394 			*kdiv = 2;
2395 		}
2396 	} else {
2397 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2398 			*pdiv = bestdiv;
2399 			*qdiv = 1;
2400 			*kdiv = 1;
2401 		} else { /* 9, 15, 21 */
2402 			*pdiv = bestdiv / 3;
2403 			*qdiv = 1;
2404 			*kdiv = 3;
2405 		}
2406 	}
2407 }
2408 
2409 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2410 				      u32 dco_freq, u32 ref_freq,
2411 				      int pdiv, int qdiv, int kdiv)
2412 {
2413 	u32 dco;
2414 
2415 	switch (kdiv) {
2416 	case 1:
2417 		params->kdiv = 1;
2418 		break;
2419 	case 2:
2420 		params->kdiv = 2;
2421 		break;
2422 	case 3:
2423 		params->kdiv = 4;
2424 		break;
2425 	default:
2426 		WARN(1, "Incorrect KDiv\n");
2427 	}
2428 
2429 	switch (pdiv) {
2430 	case 2:
2431 		params->pdiv = 1;
2432 		break;
2433 	case 3:
2434 		params->pdiv = 2;
2435 		break;
2436 	case 5:
2437 		params->pdiv = 4;
2438 		break;
2439 	case 7:
2440 		params->pdiv = 8;
2441 		break;
2442 	default:
2443 		WARN(1, "Incorrect PDiv\n");
2444 	}
2445 
2446 	WARN_ON(kdiv != 2 && qdiv != 1);
2447 
2448 	params->qdiv_ratio = qdiv;
2449 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2450 
2451 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2452 
2453 	params->dco_integer = dco >> 15;
2454 	params->dco_fraction = dco & 0x7fff;
2455 }
2456 
2457 /*
2458  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2459  * Program half of the nominal DCO divider fraction value.
2460  */
2461 static bool
2462 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2463 {
2464 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2465 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2466 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2467 		 i915->display.dpll.ref_clks.nssc == 38400;
2468 }
2469 
2470 struct icl_combo_pll_params {
2471 	int clock;
2472 	struct skl_wrpll_params wrpll;
2473 };
2474 
2475 /*
2476  * These values alrea already adjusted: they're the bits we write to the
2477  * registers, not the logical values.
2478  */
2479 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2480 	{ 540000,
2481 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2482 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2483 	{ 270000,
2484 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2485 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2486 	{ 162000,
2487 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2488 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2489 	{ 324000,
2490 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2491 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2492 	{ 216000,
2493 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2494 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2495 	{ 432000,
2496 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2497 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2498 	{ 648000,
2499 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2500 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2501 	{ 810000,
2502 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2503 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2504 };
2505 
2506 
2507 /* Also used for 38.4 MHz values. */
2508 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2509 	{ 540000,
2510 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2511 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 	{ 270000,
2513 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2514 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 	{ 162000,
2516 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2517 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2518 	{ 324000,
2519 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2520 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 	{ 216000,
2522 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2523 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2524 	{ 432000,
2525 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2526 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2527 	{ 648000,
2528 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2529 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2530 	{ 810000,
2531 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2532 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2533 };
2534 
2535 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2536 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2537 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2538 };
2539 
2540 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2541 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2542 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2543 };
2544 
2545 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2546 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2547 	/* the following params are unused */
2548 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2549 };
2550 
2551 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2552 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2553 	/* the following params are unused */
2554 };
2555 
2556 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2557 				 struct skl_wrpll_params *pll_params)
2558 {
2559 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2560 	const struct icl_combo_pll_params *params =
2561 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2562 		icl_dp_combo_pll_24MHz_values :
2563 		icl_dp_combo_pll_19_2MHz_values;
2564 	int clock = crtc_state->port_clock;
2565 	int i;
2566 
2567 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2568 		if (clock == params[i].clock) {
2569 			*pll_params = params[i].wrpll;
2570 			return 0;
2571 		}
2572 	}
2573 
2574 	MISSING_CASE(clock);
2575 	return -EINVAL;
2576 }
2577 
2578 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2579 			    struct skl_wrpll_params *pll_params)
2580 {
2581 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2582 
2583 	if (DISPLAY_VER(dev_priv) >= 12) {
2584 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2585 		default:
2586 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2587 			fallthrough;
2588 		case 19200:
2589 		case 38400:
2590 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2591 			break;
2592 		case 24000:
2593 			*pll_params = tgl_tbt_pll_24MHz_values;
2594 			break;
2595 		}
2596 	} else {
2597 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2598 		default:
2599 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2600 			fallthrough;
2601 		case 19200:
2602 		case 38400:
2603 			*pll_params = icl_tbt_pll_19_2MHz_values;
2604 			break;
2605 		case 24000:
2606 			*pll_params = icl_tbt_pll_24MHz_values;
2607 			break;
2608 		}
2609 	}
2610 
2611 	return 0;
2612 }
2613 
2614 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2615 				    const struct intel_shared_dpll *pll,
2616 				    const struct intel_dpll_hw_state *pll_state)
2617 {
2618 	/*
2619 	 * The PLL outputs multiple frequencies at the same time, selection is
2620 	 * made at DDI clock mux level.
2621 	 */
2622 	drm_WARN_ON(&i915->drm, 1);
2623 
2624 	return 0;
2625 }
2626 
2627 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2628 {
2629 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2630 
2631 	/*
2632 	 * For ICL+, the spec states: if reference frequency is 38.4,
2633 	 * use 19.2 because the DPLL automatically divides that by 2.
2634 	 */
2635 	if (ref_clock == 38400)
2636 		ref_clock = 19200;
2637 
2638 	return ref_clock;
2639 }
2640 
2641 static int
2642 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2643 	       struct skl_wrpll_params *wrpll_params)
2644 {
2645 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2646 	int ref_clock = icl_wrpll_ref_clock(i915);
2647 	u32 afe_clock = crtc_state->port_clock * 5;
2648 	u32 dco_min = 7998000;
2649 	u32 dco_max = 10000000;
2650 	u32 dco_mid = (dco_min + dco_max) / 2;
2651 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2652 					 18, 20, 24, 28, 30, 32,  36,  40,
2653 					 42, 44, 48, 50, 52, 54,  56,  60,
2654 					 64, 66, 68, 70, 72, 76,  78,  80,
2655 					 84, 88, 90, 92, 96, 98, 100, 102,
2656 					  3,  5,  7,  9, 15, 21 };
2657 	u32 dco, best_dco = 0, dco_centrality = 0;
2658 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2659 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2660 
2661 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2662 		dco = afe_clock * dividers[d];
2663 
2664 		if (dco <= dco_max && dco >= dco_min) {
2665 			dco_centrality = abs(dco - dco_mid);
2666 
2667 			if (dco_centrality < best_dco_centrality) {
2668 				best_dco_centrality = dco_centrality;
2669 				best_div = dividers[d];
2670 				best_dco = dco;
2671 			}
2672 		}
2673 	}
2674 
2675 	if (best_div == 0)
2676 		return -EINVAL;
2677 
2678 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2679 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2680 				  pdiv, qdiv, kdiv);
2681 
2682 	return 0;
2683 }
2684 
2685 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2686 				      const struct intel_shared_dpll *pll,
2687 				      const struct intel_dpll_hw_state *pll_state)
2688 {
2689 	int ref_clock = icl_wrpll_ref_clock(i915);
2690 	u32 dco_fraction;
2691 	u32 p0, p1, p2, dco_freq;
2692 
2693 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2694 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2695 
2696 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2697 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2698 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2699 	else
2700 		p1 = 1;
2701 
2702 	switch (p0) {
2703 	case DPLL_CFGCR1_PDIV_2:
2704 		p0 = 2;
2705 		break;
2706 	case DPLL_CFGCR1_PDIV_3:
2707 		p0 = 3;
2708 		break;
2709 	case DPLL_CFGCR1_PDIV_5:
2710 		p0 = 5;
2711 		break;
2712 	case DPLL_CFGCR1_PDIV_7:
2713 		p0 = 7;
2714 		break;
2715 	}
2716 
2717 	switch (p2) {
2718 	case DPLL_CFGCR1_KDIV_1:
2719 		p2 = 1;
2720 		break;
2721 	case DPLL_CFGCR1_KDIV_2:
2722 		p2 = 2;
2723 		break;
2724 	case DPLL_CFGCR1_KDIV_3:
2725 		p2 = 3;
2726 		break;
2727 	}
2728 
2729 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2730 		   ref_clock;
2731 
2732 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2733 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2734 
2735 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2736 		dco_fraction *= 2;
2737 
2738 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2739 
2740 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2741 		return 0;
2742 
2743 	return dco_freq / (p0 * p1 * p2 * 5);
2744 }
2745 
2746 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2747 				const struct skl_wrpll_params *pll_params,
2748 				struct intel_dpll_hw_state *pll_state)
2749 {
2750 	u32 dco_fraction = pll_params->dco_fraction;
2751 
2752 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2753 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2754 
2755 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2756 			    pll_params->dco_integer;
2757 
2758 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2759 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2760 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2761 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2762 
2763 	if (DISPLAY_VER(i915) >= 12)
2764 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2765 	else
2766 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2767 
2768 	if (i915->display.vbt.override_afc_startup)
2769 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2770 }
2771 
2772 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2773 				    u32 *target_dco_khz,
2774 				    struct intel_dpll_hw_state *state,
2775 				    bool is_dkl)
2776 {
2777 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2778 	u32 dco_min_freq, dco_max_freq;
2779 	unsigned int i;
2780 	int div2;
2781 
2782 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2783 	dco_max_freq = is_dp ? 8100000 : 10000000;
2784 
2785 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2786 		int div1 = div1_vals[i];
2787 
2788 		for (div2 = 10; div2 > 0; div2--) {
2789 			int dco = div1 * div2 * clock_khz * 5;
2790 			int a_divratio, tlinedrv, inputsel;
2791 			u32 hsdiv;
2792 
2793 			if (dco < dco_min_freq || dco > dco_max_freq)
2794 				continue;
2795 
2796 			if (div2 >= 2) {
2797 				/*
2798 				 * Note: a_divratio not matching TGL BSpec
2799 				 * algorithm but matching hardcoded values and
2800 				 * working on HW for DP alt-mode at least
2801 				 */
2802 				a_divratio = is_dp ? 10 : 5;
2803 				tlinedrv = is_dkl ? 1 : 2;
2804 			} else {
2805 				a_divratio = 5;
2806 				tlinedrv = 0;
2807 			}
2808 			inputsel = is_dp ? 0 : 1;
2809 
2810 			switch (div1) {
2811 			default:
2812 				MISSING_CASE(div1);
2813 				fallthrough;
2814 			case 2:
2815 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2816 				break;
2817 			case 3:
2818 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2819 				break;
2820 			case 5:
2821 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2822 				break;
2823 			case 7:
2824 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2825 				break;
2826 			}
2827 
2828 			*target_dco_khz = dco;
2829 
2830 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2831 
2832 			state->mg_clktop2_coreclkctl1 =
2833 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2834 
2835 			state->mg_clktop2_hsclkctl =
2836 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2837 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2838 				hsdiv |
2839 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2840 
2841 			return 0;
2842 		}
2843 	}
2844 
2845 	return -EINVAL;
2846 }
2847 
2848 /*
2849  * The specification for this function uses real numbers, so the math had to be
2850  * adapted to integer-only calculation, that's why it looks so different.
2851  */
2852 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2853 				 struct intel_dpll_hw_state *pll_state)
2854 {
2855 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2856 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2857 	int clock = crtc_state->port_clock;
2858 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2859 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2860 	u32 prop_coeff, int_coeff;
2861 	u32 tdc_targetcnt, feedfwgain;
2862 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2863 	u64 tmp;
2864 	bool use_ssc = false;
2865 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2866 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2867 	int ret;
2868 
2869 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2870 				       pll_state, is_dkl);
2871 	if (ret)
2872 		return ret;
2873 
2874 	m1div = 2;
2875 	m2div_int = dco_khz / (refclk_khz * m1div);
2876 	if (m2div_int > 255) {
2877 		if (!is_dkl) {
2878 			m1div = 4;
2879 			m2div_int = dco_khz / (refclk_khz * m1div);
2880 		}
2881 
2882 		if (m2div_int > 255)
2883 			return -EINVAL;
2884 	}
2885 	m2div_rem = dco_khz % (refclk_khz * m1div);
2886 
2887 	tmp = (u64)m2div_rem * (1 << 22);
2888 	do_div(tmp, refclk_khz * m1div);
2889 	m2div_frac = tmp;
2890 
2891 	switch (refclk_khz) {
2892 	case 19200:
2893 		iref_ndiv = 1;
2894 		iref_trim = 28;
2895 		iref_pulse_w = 1;
2896 		break;
2897 	case 24000:
2898 		iref_ndiv = 1;
2899 		iref_trim = 25;
2900 		iref_pulse_w = 2;
2901 		break;
2902 	case 38400:
2903 		iref_ndiv = 2;
2904 		iref_trim = 28;
2905 		iref_pulse_w = 1;
2906 		break;
2907 	default:
2908 		MISSING_CASE(refclk_khz);
2909 		return -EINVAL;
2910 	}
2911 
2912 	/*
2913 	 * tdc_res = 0.000003
2914 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2915 	 *
2916 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2917 	 * was supposed to be a division, but we rearranged the operations of
2918 	 * the formula to avoid early divisions so we don't multiply the
2919 	 * rounding errors.
2920 	 *
2921 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2922 	 * we also rearrange to work with integers.
2923 	 *
2924 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2925 	 * last division by 10.
2926 	 */
2927 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2928 
2929 	/*
2930 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2931 	 * 32 bits. That's not a problem since we round the division down
2932 	 * anyway.
2933 	 */
2934 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2935 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2936 
2937 	if (dco_khz >= 9000000) {
2938 		prop_coeff = 5;
2939 		int_coeff = 10;
2940 	} else {
2941 		prop_coeff = 4;
2942 		int_coeff = 8;
2943 	}
2944 
2945 	if (use_ssc) {
2946 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2947 		do_div(tmp, refclk_khz * m1div * 10000);
2948 		ssc_stepsize = tmp;
2949 
2950 		tmp = mul_u32_u32(dco_khz, 1000);
2951 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2952 	} else {
2953 		ssc_stepsize = 0;
2954 		ssc_steplen = 0;
2955 	}
2956 	ssc_steplog = 4;
2957 
2958 	/* write pll_state calculations */
2959 	if (is_dkl) {
2960 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2961 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2962 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2963 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2964 		if (dev_priv->display.vbt.override_afc_startup) {
2965 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2966 
2967 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2968 		}
2969 
2970 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2971 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2972 
2973 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2974 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2975 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2976 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2977 
2978 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2979 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2980 
2981 		pll_state->mg_pll_tdc_coldst_bias =
2982 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2983 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2984 
2985 	} else {
2986 		pll_state->mg_pll_div0 =
2987 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2988 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2989 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2990 
2991 		pll_state->mg_pll_div1 =
2992 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2993 			MG_PLL_DIV1_DITHER_DIV_2 |
2994 			MG_PLL_DIV1_NDIVRATIO(1) |
2995 			MG_PLL_DIV1_FBPREDIV(m1div);
2996 
2997 		pll_state->mg_pll_lf =
2998 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2999 			MG_PLL_LF_AFCCNTSEL_512 |
3000 			MG_PLL_LF_GAINCTRL(1) |
3001 			MG_PLL_LF_INT_COEFF(int_coeff) |
3002 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3003 
3004 		pll_state->mg_pll_frac_lock =
3005 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3006 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3007 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3008 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3009 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3010 		if (use_ssc || m2div_rem > 0)
3011 			pll_state->mg_pll_frac_lock |=
3012 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3013 
3014 		pll_state->mg_pll_ssc =
3015 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3016 			MG_PLL_SSC_TYPE(2) |
3017 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3018 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3019 			MG_PLL_SSC_FLLEN |
3020 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3021 
3022 		pll_state->mg_pll_tdc_coldst_bias =
3023 			MG_PLL_TDC_COLDST_COLDSTART |
3024 			MG_PLL_TDC_COLDST_IREFINT_EN |
3025 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3026 			MG_PLL_TDC_TDCOVCCORR_EN |
3027 			MG_PLL_TDC_TDCSEL(3);
3028 
3029 		pll_state->mg_pll_bias =
3030 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3031 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3032 			MG_PLL_BIAS_BIAS_BONUS(10) |
3033 			MG_PLL_BIAS_BIASCAL_EN |
3034 			MG_PLL_BIAS_CTRIM(12) |
3035 			MG_PLL_BIAS_VREF_RDAC(4) |
3036 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3037 
3038 		if (refclk_khz == 38400) {
3039 			pll_state->mg_pll_tdc_coldst_bias_mask =
3040 				MG_PLL_TDC_COLDST_COLDSTART;
3041 			pll_state->mg_pll_bias_mask = 0;
3042 		} else {
3043 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3044 			pll_state->mg_pll_bias_mask = -1U;
3045 		}
3046 
3047 		pll_state->mg_pll_tdc_coldst_bias &=
3048 			pll_state->mg_pll_tdc_coldst_bias_mask;
3049 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3050 	}
3051 
3052 	return 0;
3053 }
3054 
3055 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3056 				   const struct intel_shared_dpll *pll,
3057 				   const struct intel_dpll_hw_state *pll_state)
3058 {
3059 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3060 	u64 tmp;
3061 
3062 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3063 
3064 	if (DISPLAY_VER(dev_priv) >= 12) {
3065 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3066 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3067 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3068 
3069 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3070 			m2_frac = pll_state->mg_pll_bias &
3071 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3072 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3073 		} else {
3074 			m2_frac = 0;
3075 		}
3076 	} else {
3077 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3078 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3079 
3080 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3081 			m2_frac = pll_state->mg_pll_div0 &
3082 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3083 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3084 		} else {
3085 			m2_frac = 0;
3086 		}
3087 	}
3088 
3089 	switch (pll_state->mg_clktop2_hsclkctl &
3090 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3091 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3092 		div1 = 2;
3093 		break;
3094 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3095 		div1 = 3;
3096 		break;
3097 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3098 		div1 = 5;
3099 		break;
3100 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3101 		div1 = 7;
3102 		break;
3103 	default:
3104 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3105 		return 0;
3106 	}
3107 
3108 	div2 = (pll_state->mg_clktop2_hsclkctl &
3109 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3110 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3111 
3112 	/* div2 value of 0 is same as 1 means no div */
3113 	if (div2 == 0)
3114 		div2 = 1;
3115 
3116 	/*
3117 	 * Adjust the original formula to delay the division by 2^22 in order to
3118 	 * minimize possible rounding errors.
3119 	 */
3120 	tmp = (u64)m1 * m2_int * ref_clock +
3121 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3122 	tmp = div_u64(tmp, 5 * div1 * div2);
3123 
3124 	return tmp;
3125 }
3126 
3127 /**
3128  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3129  * @crtc_state: state for the CRTC to select the DPLL for
3130  * @port_dpll_id: the active @port_dpll_id to select
3131  *
3132  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3133  * CRTC.
3134  */
3135 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3136 			      enum icl_port_dpll_id port_dpll_id)
3137 {
3138 	struct icl_port_dpll *port_dpll =
3139 		&crtc_state->icl_port_dplls[port_dpll_id];
3140 
3141 	crtc_state->shared_dpll = port_dpll->pll;
3142 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3143 }
3144 
3145 static void icl_update_active_dpll(struct intel_atomic_state *state,
3146 				   struct intel_crtc *crtc,
3147 				   struct intel_encoder *encoder)
3148 {
3149 	struct intel_crtc_state *crtc_state =
3150 		intel_atomic_get_new_crtc_state(state, crtc);
3151 	struct intel_digital_port *primary_port;
3152 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3153 
3154 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3155 		enc_to_mst(encoder)->primary :
3156 		enc_to_dig_port(encoder);
3157 
3158 	if (primary_port &&
3159 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3160 	     intel_tc_port_in_legacy_mode(primary_port)))
3161 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3162 
3163 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3164 }
3165 
3166 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3167 {
3168 	if (!(i915->hti_state & HDPORT_ENABLED))
3169 		return 0;
3170 
3171 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3172 }
3173 
3174 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3175 				      struct intel_crtc *crtc)
3176 {
3177 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3178 	struct intel_crtc_state *crtc_state =
3179 		intel_atomic_get_new_crtc_state(state, crtc);
3180 	struct icl_port_dpll *port_dpll =
3181 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3182 	struct skl_wrpll_params pll_params = {};
3183 	int ret;
3184 
3185 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3186 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3187 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3188 	else
3189 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3190 
3191 	if (ret)
3192 		return ret;
3193 
3194 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3195 
3196 	/* this is mainly for the fastset check */
3197 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3198 
3199 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3200 							    &port_dpll->hw_state);
3201 
3202 	return 0;
3203 }
3204 
3205 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3206 				  struct intel_crtc *crtc,
3207 				  struct intel_encoder *encoder)
3208 {
3209 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3210 	struct intel_crtc_state *crtc_state =
3211 		intel_atomic_get_new_crtc_state(state, crtc);
3212 	struct icl_port_dpll *port_dpll =
3213 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3214 	enum port port = encoder->port;
3215 	unsigned long dpll_mask;
3216 
3217 	if (IS_ALDERLAKE_S(dev_priv)) {
3218 		dpll_mask =
3219 			BIT(DPLL_ID_DG1_DPLL3) |
3220 			BIT(DPLL_ID_DG1_DPLL2) |
3221 			BIT(DPLL_ID_ICL_DPLL1) |
3222 			BIT(DPLL_ID_ICL_DPLL0);
3223 	} else if (IS_DG1(dev_priv)) {
3224 		if (port == PORT_D || port == PORT_E) {
3225 			dpll_mask =
3226 				BIT(DPLL_ID_DG1_DPLL2) |
3227 				BIT(DPLL_ID_DG1_DPLL3);
3228 		} else {
3229 			dpll_mask =
3230 				BIT(DPLL_ID_DG1_DPLL0) |
3231 				BIT(DPLL_ID_DG1_DPLL1);
3232 		}
3233 	} else if (IS_ROCKETLAKE(dev_priv)) {
3234 		dpll_mask =
3235 			BIT(DPLL_ID_EHL_DPLL4) |
3236 			BIT(DPLL_ID_ICL_DPLL1) |
3237 			BIT(DPLL_ID_ICL_DPLL0);
3238 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3239 		dpll_mask =
3240 			BIT(DPLL_ID_EHL_DPLL4) |
3241 			BIT(DPLL_ID_ICL_DPLL1) |
3242 			BIT(DPLL_ID_ICL_DPLL0);
3243 	} else {
3244 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3245 	}
3246 
3247 	/* Eliminate DPLLs from consideration if reserved by HTI */
3248 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3249 
3250 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3251 						&port_dpll->hw_state,
3252 						dpll_mask);
3253 	if (!port_dpll->pll)
3254 		return -EINVAL;
3255 
3256 	intel_reference_shared_dpll(state, crtc,
3257 				    port_dpll->pll, &port_dpll->hw_state);
3258 
3259 	icl_update_active_dpll(state, crtc, encoder);
3260 
3261 	return 0;
3262 }
3263 
3264 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3265 				    struct intel_crtc *crtc)
3266 {
3267 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3268 	struct intel_crtc_state *crtc_state =
3269 		intel_atomic_get_new_crtc_state(state, crtc);
3270 	struct icl_port_dpll *port_dpll =
3271 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3272 	struct skl_wrpll_params pll_params = {};
3273 	int ret;
3274 
3275 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3276 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3277 	if (ret)
3278 		return ret;
3279 
3280 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3281 
3282 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3283 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3284 	if (ret)
3285 		return ret;
3286 
3287 	/* this is mainly for the fastset check */
3288 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3289 
3290 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3291 							 &port_dpll->hw_state);
3292 
3293 	return 0;
3294 }
3295 
3296 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3297 				struct intel_crtc *crtc,
3298 				struct intel_encoder *encoder)
3299 {
3300 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3301 	struct intel_crtc_state *crtc_state =
3302 		intel_atomic_get_new_crtc_state(state, crtc);
3303 	struct icl_port_dpll *port_dpll =
3304 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3305 	enum intel_dpll_id dpll_id;
3306 	int ret;
3307 
3308 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3309 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3310 						&port_dpll->hw_state,
3311 						BIT(DPLL_ID_ICL_TBTPLL));
3312 	if (!port_dpll->pll)
3313 		return -EINVAL;
3314 	intel_reference_shared_dpll(state, crtc,
3315 				    port_dpll->pll, &port_dpll->hw_state);
3316 
3317 
3318 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3319 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3320 							 encoder->port));
3321 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3322 						&port_dpll->hw_state,
3323 						BIT(dpll_id));
3324 	if (!port_dpll->pll) {
3325 		ret = -EINVAL;
3326 		goto err_unreference_tbt_pll;
3327 	}
3328 	intel_reference_shared_dpll(state, crtc,
3329 				    port_dpll->pll, &port_dpll->hw_state);
3330 
3331 	icl_update_active_dpll(state, crtc, encoder);
3332 
3333 	return 0;
3334 
3335 err_unreference_tbt_pll:
3336 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3337 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3338 
3339 	return ret;
3340 }
3341 
3342 static int icl_compute_dplls(struct intel_atomic_state *state,
3343 			     struct intel_crtc *crtc,
3344 			     struct intel_encoder *encoder)
3345 {
3346 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3347 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3348 
3349 	if (intel_phy_is_combo(dev_priv, phy))
3350 		return icl_compute_combo_phy_dpll(state, crtc);
3351 	else if (intel_phy_is_tc(dev_priv, phy))
3352 		return icl_compute_tc_phy_dplls(state, crtc);
3353 
3354 	MISSING_CASE(phy);
3355 
3356 	return 0;
3357 }
3358 
3359 static int icl_get_dplls(struct intel_atomic_state *state,
3360 			 struct intel_crtc *crtc,
3361 			 struct intel_encoder *encoder)
3362 {
3363 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3364 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3365 
3366 	if (intel_phy_is_combo(dev_priv, phy))
3367 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3368 	else if (intel_phy_is_tc(dev_priv, phy))
3369 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3370 
3371 	MISSING_CASE(phy);
3372 
3373 	return -EINVAL;
3374 }
3375 
3376 static void icl_put_dplls(struct intel_atomic_state *state,
3377 			  struct intel_crtc *crtc)
3378 {
3379 	const struct intel_crtc_state *old_crtc_state =
3380 		intel_atomic_get_old_crtc_state(state, crtc);
3381 	struct intel_crtc_state *new_crtc_state =
3382 		intel_atomic_get_new_crtc_state(state, crtc);
3383 	enum icl_port_dpll_id id;
3384 
3385 	new_crtc_state->shared_dpll = NULL;
3386 
3387 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3388 		const struct icl_port_dpll *old_port_dpll =
3389 			&old_crtc_state->icl_port_dplls[id];
3390 		struct icl_port_dpll *new_port_dpll =
3391 			&new_crtc_state->icl_port_dplls[id];
3392 
3393 		new_port_dpll->pll = NULL;
3394 
3395 		if (!old_port_dpll->pll)
3396 			continue;
3397 
3398 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3399 	}
3400 }
3401 
3402 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3403 				struct intel_shared_dpll *pll,
3404 				struct intel_dpll_hw_state *hw_state)
3405 {
3406 	const enum intel_dpll_id id = pll->info->id;
3407 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3408 	intel_wakeref_t wakeref;
3409 	bool ret = false;
3410 	u32 val;
3411 
3412 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3413 
3414 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3415 						     POWER_DOMAIN_DISPLAY_CORE);
3416 	if (!wakeref)
3417 		return false;
3418 
3419 	val = intel_de_read(dev_priv, enable_reg);
3420 	if (!(val & PLL_ENABLE))
3421 		goto out;
3422 
3423 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3424 						  MG_REFCLKIN_CTL(tc_port));
3425 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3426 
3427 	hw_state->mg_clktop2_coreclkctl1 =
3428 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3429 	hw_state->mg_clktop2_coreclkctl1 &=
3430 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3431 
3432 	hw_state->mg_clktop2_hsclkctl =
3433 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3434 	hw_state->mg_clktop2_hsclkctl &=
3435 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3436 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3437 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3438 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3439 
3440 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3441 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3442 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3443 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3444 						   MG_PLL_FRAC_LOCK(tc_port));
3445 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3446 
3447 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3448 	hw_state->mg_pll_tdc_coldst_bias =
3449 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3450 
3451 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3452 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3453 		hw_state->mg_pll_bias_mask = 0;
3454 	} else {
3455 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3456 		hw_state->mg_pll_bias_mask = -1U;
3457 	}
3458 
3459 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3460 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3461 
3462 	ret = true;
3463 out:
3464 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3465 	return ret;
3466 }
3467 
3468 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3469 				 struct intel_shared_dpll *pll,
3470 				 struct intel_dpll_hw_state *hw_state)
3471 {
3472 	const enum intel_dpll_id id = pll->info->id;
3473 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3474 	intel_wakeref_t wakeref;
3475 	bool ret = false;
3476 	u32 val;
3477 
3478 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3479 						     POWER_DOMAIN_DISPLAY_CORE);
3480 	if (!wakeref)
3481 		return false;
3482 
3483 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3484 	if (!(val & PLL_ENABLE))
3485 		goto out;
3486 
3487 	/*
3488 	 * All registers read here have the same HIP_INDEX_REG even though
3489 	 * they are on different building blocks
3490 	 */
3491 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3492 						       DKL_REFCLKIN_CTL(tc_port));
3493 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3494 
3495 	hw_state->mg_clktop2_hsclkctl =
3496 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3497 	hw_state->mg_clktop2_hsclkctl &=
3498 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3499 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3500 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3501 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3502 
3503 	hw_state->mg_clktop2_coreclkctl1 =
3504 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3505 	hw_state->mg_clktop2_coreclkctl1 &=
3506 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3507 
3508 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3509 	val = DKL_PLL_DIV0_MASK;
3510 	if (dev_priv->display.vbt.override_afc_startup)
3511 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3512 	hw_state->mg_pll_div0 &= val;
3513 
3514 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3515 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3516 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3517 
3518 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3519 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3520 				 DKL_PLL_SSC_STEP_LEN_MASK |
3521 				 DKL_PLL_SSC_STEP_NUM_MASK |
3522 				 DKL_PLL_SSC_EN);
3523 
3524 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3525 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3526 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3527 
3528 	hw_state->mg_pll_tdc_coldst_bias =
3529 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3530 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3531 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3532 
3533 	ret = true;
3534 out:
3535 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3536 	return ret;
3537 }
3538 
3539 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3540 				 struct intel_shared_dpll *pll,
3541 				 struct intel_dpll_hw_state *hw_state,
3542 				 i915_reg_t enable_reg)
3543 {
3544 	const enum intel_dpll_id id = pll->info->id;
3545 	intel_wakeref_t wakeref;
3546 	bool ret = false;
3547 	u32 val;
3548 
3549 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3550 						     POWER_DOMAIN_DISPLAY_CORE);
3551 	if (!wakeref)
3552 		return false;
3553 
3554 	val = intel_de_read(dev_priv, enable_reg);
3555 	if (!(val & PLL_ENABLE))
3556 		goto out;
3557 
3558 	if (IS_ALDERLAKE_S(dev_priv)) {
3559 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3560 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3561 	} else if (IS_DG1(dev_priv)) {
3562 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3563 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3564 	} else if (IS_ROCKETLAKE(dev_priv)) {
3565 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3566 						 RKL_DPLL_CFGCR0(id));
3567 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3568 						 RKL_DPLL_CFGCR1(id));
3569 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3570 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3571 						 TGL_DPLL_CFGCR0(id));
3572 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3573 						 TGL_DPLL_CFGCR1(id));
3574 		if (dev_priv->display.vbt.override_afc_startup) {
3575 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3576 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3577 		}
3578 	} else {
3579 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3580 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3581 							 ICL_DPLL_CFGCR0(4));
3582 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3583 							 ICL_DPLL_CFGCR1(4));
3584 		} else {
3585 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3586 							 ICL_DPLL_CFGCR0(id));
3587 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3588 							 ICL_DPLL_CFGCR1(id));
3589 		}
3590 	}
3591 
3592 	ret = true;
3593 out:
3594 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3595 	return ret;
3596 }
3597 
3598 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3599 				   struct intel_shared_dpll *pll,
3600 				   struct intel_dpll_hw_state *hw_state)
3601 {
3602 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3603 
3604 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3605 }
3606 
3607 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3608 				 struct intel_shared_dpll *pll,
3609 				 struct intel_dpll_hw_state *hw_state)
3610 {
3611 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3612 }
3613 
3614 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3615 			   struct intel_shared_dpll *pll)
3616 {
3617 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3618 	const enum intel_dpll_id id = pll->info->id;
3619 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3620 
3621 	if (IS_ALDERLAKE_S(dev_priv)) {
3622 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3623 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3624 	} else if (IS_DG1(dev_priv)) {
3625 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3626 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3627 	} else if (IS_ROCKETLAKE(dev_priv)) {
3628 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3629 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3630 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3631 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3632 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3633 		div0_reg = TGL_DPLL0_DIV0(id);
3634 	} else {
3635 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3636 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3637 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3638 		} else {
3639 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3640 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3641 		}
3642 	}
3643 
3644 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3645 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3646 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3647 			 !i915_mmio_reg_valid(div0_reg));
3648 	if (dev_priv->display.vbt.override_afc_startup &&
3649 	    i915_mmio_reg_valid(div0_reg))
3650 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3651 			     hw_state->div0);
3652 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3653 }
3654 
3655 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3656 			     struct intel_shared_dpll *pll)
3657 {
3658 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3659 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3660 	u32 val;
3661 
3662 	/*
3663 	 * Some of the following registers have reserved fields, so program
3664 	 * these with RMW based on a mask. The mask can be fixed or generated
3665 	 * during the calc/readout phase if the mask depends on some other HW
3666 	 * state like refclk, see icl_calc_mg_pll_state().
3667 	 */
3668 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3669 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3670 	val |= hw_state->mg_refclkin_ctl;
3671 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3672 
3673 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3674 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3675 	val |= hw_state->mg_clktop2_coreclkctl1;
3676 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3677 
3678 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3679 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3680 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3681 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3682 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3683 	val |= hw_state->mg_clktop2_hsclkctl;
3684 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3685 
3686 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3687 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3688 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3689 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3690 		       hw_state->mg_pll_frac_lock);
3691 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3692 
3693 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3694 	val &= ~hw_state->mg_pll_bias_mask;
3695 	val |= hw_state->mg_pll_bias;
3696 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3697 
3698 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3699 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3700 	val |= hw_state->mg_pll_tdc_coldst_bias;
3701 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3702 
3703 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3704 }
3705 
3706 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3707 			  struct intel_shared_dpll *pll)
3708 {
3709 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3710 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3711 	u32 val;
3712 
3713 	/*
3714 	 * All registers programmed here have the same HIP_INDEX_REG even
3715 	 * though on different building block
3716 	 */
3717 	/* All the registers are RMW */
3718 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3719 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3720 	val |= hw_state->mg_refclkin_ctl;
3721 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3722 
3723 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3724 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3725 	val |= hw_state->mg_clktop2_coreclkctl1;
3726 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3727 
3728 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3729 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3730 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3731 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3732 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3733 	val |= hw_state->mg_clktop2_hsclkctl;
3734 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3735 
3736 	val = DKL_PLL_DIV0_MASK;
3737 	if (dev_priv->display.vbt.override_afc_startup)
3738 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3739 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3740 			  hw_state->mg_pll_div0);
3741 
3742 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3743 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3744 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3745 	val |= hw_state->mg_pll_div1;
3746 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3747 
3748 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3749 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3750 		 DKL_PLL_SSC_STEP_LEN_MASK |
3751 		 DKL_PLL_SSC_STEP_NUM_MASK |
3752 		 DKL_PLL_SSC_EN);
3753 	val |= hw_state->mg_pll_ssc;
3754 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3755 
3756 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3757 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3758 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3759 	val |= hw_state->mg_pll_bias;
3760 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3761 
3762 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3763 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3764 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3765 	val |= hw_state->mg_pll_tdc_coldst_bias;
3766 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3767 
3768 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3769 }
3770 
3771 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3772 				 struct intel_shared_dpll *pll,
3773 				 i915_reg_t enable_reg)
3774 {
3775 	u32 val;
3776 
3777 	val = intel_de_read(dev_priv, enable_reg);
3778 	val |= PLL_POWER_ENABLE;
3779 	intel_de_write(dev_priv, enable_reg, val);
3780 
3781 	/*
3782 	 * The spec says we need to "wait" but it also says it should be
3783 	 * immediate.
3784 	 */
3785 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3786 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3787 			pll->info->id);
3788 }
3789 
3790 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3791 			   struct intel_shared_dpll *pll,
3792 			   i915_reg_t enable_reg)
3793 {
3794 	u32 val;
3795 
3796 	val = intel_de_read(dev_priv, enable_reg);
3797 	val |= PLL_ENABLE;
3798 	intel_de_write(dev_priv, enable_reg, val);
3799 
3800 	/* Timeout is actually 600us. */
3801 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3802 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3803 }
3804 
3805 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3806 {
3807 	u32 val;
3808 
3809 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3810 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3811 		return;
3812 	/*
3813 	 * Wa_16011069516:adl-p[a0]
3814 	 *
3815 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3816 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3817 	 * sanity check this assumption with a double read, which presumably
3818 	 * returns the correct value even with clock gating on.
3819 	 *
3820 	 * Instead of the usual place for workarounds we apply this one here,
3821 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3822 	 */
3823 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3824 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3825 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3826 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3827 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3828 }
3829 
3830 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3831 			     struct intel_shared_dpll *pll)
3832 {
3833 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3834 
3835 	if (IS_JSL_EHL(dev_priv) &&
3836 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3837 
3838 		/*
3839 		 * We need to disable DC states when this DPLL is enabled.
3840 		 * This can be done by taking a reference on DPLL4 power
3841 		 * domain.
3842 		 */
3843 		pll->wakeref = intel_display_power_get(dev_priv,
3844 						       POWER_DOMAIN_DC_OFF);
3845 	}
3846 
3847 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3848 
3849 	icl_dpll_write(dev_priv, pll);
3850 
3851 	/*
3852 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3853 	 * paths should already be setting the appropriate voltage, hence we do
3854 	 * nothing here.
3855 	 */
3856 
3857 	icl_pll_enable(dev_priv, pll, enable_reg);
3858 
3859 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3860 
3861 	/* DVFS post sequence would be here. See the comment above. */
3862 }
3863 
3864 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3865 			   struct intel_shared_dpll *pll)
3866 {
3867 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3868 
3869 	icl_dpll_write(dev_priv, pll);
3870 
3871 	/*
3872 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3873 	 * paths should already be setting the appropriate voltage, hence we do
3874 	 * nothing here.
3875 	 */
3876 
3877 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3878 
3879 	/* DVFS post sequence would be here. See the comment above. */
3880 }
3881 
3882 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3883 			  struct intel_shared_dpll *pll)
3884 {
3885 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3886 
3887 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3888 
3889 	if (DISPLAY_VER(dev_priv) >= 12)
3890 		dkl_pll_write(dev_priv, pll);
3891 	else
3892 		icl_mg_pll_write(dev_priv, pll);
3893 
3894 	/*
3895 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3896 	 * paths should already be setting the appropriate voltage, hence we do
3897 	 * nothing here.
3898 	 */
3899 
3900 	icl_pll_enable(dev_priv, pll, enable_reg);
3901 
3902 	/* DVFS post sequence would be here. See the comment above. */
3903 }
3904 
3905 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3906 			    struct intel_shared_dpll *pll,
3907 			    i915_reg_t enable_reg)
3908 {
3909 	u32 val;
3910 
3911 	/* The first steps are done by intel_ddi_post_disable(). */
3912 
3913 	/*
3914 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3915 	 * paths should already be setting the appropriate voltage, hence we do
3916 	 * nothing here.
3917 	 */
3918 
3919 	val = intel_de_read(dev_priv, enable_reg);
3920 	val &= ~PLL_ENABLE;
3921 	intel_de_write(dev_priv, enable_reg, val);
3922 
3923 	/* Timeout is actually 1us. */
3924 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3925 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3926 
3927 	/* DVFS post sequence would be here. See the comment above. */
3928 
3929 	val = intel_de_read(dev_priv, enable_reg);
3930 	val &= ~PLL_POWER_ENABLE;
3931 	intel_de_write(dev_priv, enable_reg, val);
3932 
3933 	/*
3934 	 * The spec says we need to "wait" but it also says it should be
3935 	 * immediate.
3936 	 */
3937 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3938 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3939 			pll->info->id);
3940 }
3941 
3942 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3943 			      struct intel_shared_dpll *pll)
3944 {
3945 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3946 
3947 	icl_pll_disable(dev_priv, pll, enable_reg);
3948 
3949 	if (IS_JSL_EHL(dev_priv) &&
3950 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3951 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3952 					pll->wakeref);
3953 }
3954 
3955 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3956 			    struct intel_shared_dpll *pll)
3957 {
3958 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3959 }
3960 
3961 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3962 			   struct intel_shared_dpll *pll)
3963 {
3964 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3965 
3966 	icl_pll_disable(dev_priv, pll, enable_reg);
3967 }
3968 
3969 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3970 {
3971 	/* No SSC ref */
3972 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3973 }
3974 
3975 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3976 			      const struct intel_dpll_hw_state *hw_state)
3977 {
3978 	drm_dbg_kms(&dev_priv->drm,
3979 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3980 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3981 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3982 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3983 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3984 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3985 		    hw_state->cfgcr0, hw_state->cfgcr1,
3986 		    hw_state->div0,
3987 		    hw_state->mg_refclkin_ctl,
3988 		    hw_state->mg_clktop2_coreclkctl1,
3989 		    hw_state->mg_clktop2_hsclkctl,
3990 		    hw_state->mg_pll_div0,
3991 		    hw_state->mg_pll_div1,
3992 		    hw_state->mg_pll_lf,
3993 		    hw_state->mg_pll_frac_lock,
3994 		    hw_state->mg_pll_ssc,
3995 		    hw_state->mg_pll_bias,
3996 		    hw_state->mg_pll_tdc_coldst_bias);
3997 }
3998 
3999 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4000 	.enable = combo_pll_enable,
4001 	.disable = combo_pll_disable,
4002 	.get_hw_state = combo_pll_get_hw_state,
4003 	.get_freq = icl_ddi_combo_pll_get_freq,
4004 };
4005 
4006 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4007 	.enable = tbt_pll_enable,
4008 	.disable = tbt_pll_disable,
4009 	.get_hw_state = tbt_pll_get_hw_state,
4010 	.get_freq = icl_ddi_tbt_pll_get_freq,
4011 };
4012 
4013 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4014 	.enable = mg_pll_enable,
4015 	.disable = mg_pll_disable,
4016 	.get_hw_state = mg_pll_get_hw_state,
4017 	.get_freq = icl_ddi_mg_pll_get_freq,
4018 };
4019 
4020 static const struct dpll_info icl_plls[] = {
4021 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4022 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4023 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4024 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4025 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4026 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4027 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4028 	{ },
4029 };
4030 
4031 static const struct intel_dpll_mgr icl_pll_mgr = {
4032 	.dpll_info = icl_plls,
4033 	.compute_dplls = icl_compute_dplls,
4034 	.get_dplls = icl_get_dplls,
4035 	.put_dplls = icl_put_dplls,
4036 	.update_active_dpll = icl_update_active_dpll,
4037 	.update_ref_clks = icl_update_dpll_ref_clks,
4038 	.dump_hw_state = icl_dump_hw_state,
4039 };
4040 
4041 static const struct dpll_info ehl_plls[] = {
4042 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4043 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4044 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4045 	{ },
4046 };
4047 
4048 static const struct intel_dpll_mgr ehl_pll_mgr = {
4049 	.dpll_info = ehl_plls,
4050 	.compute_dplls = icl_compute_dplls,
4051 	.get_dplls = icl_get_dplls,
4052 	.put_dplls = icl_put_dplls,
4053 	.update_ref_clks = icl_update_dpll_ref_clks,
4054 	.dump_hw_state = icl_dump_hw_state,
4055 };
4056 
4057 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4058 	.enable = mg_pll_enable,
4059 	.disable = mg_pll_disable,
4060 	.get_hw_state = dkl_pll_get_hw_state,
4061 	.get_freq = icl_ddi_mg_pll_get_freq,
4062 };
4063 
4064 static const struct dpll_info tgl_plls[] = {
4065 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4066 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4067 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4068 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4069 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4070 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4071 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4072 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4073 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4074 	{ },
4075 };
4076 
4077 static const struct intel_dpll_mgr tgl_pll_mgr = {
4078 	.dpll_info = tgl_plls,
4079 	.compute_dplls = icl_compute_dplls,
4080 	.get_dplls = icl_get_dplls,
4081 	.put_dplls = icl_put_dplls,
4082 	.update_active_dpll = icl_update_active_dpll,
4083 	.update_ref_clks = icl_update_dpll_ref_clks,
4084 	.dump_hw_state = icl_dump_hw_state,
4085 };
4086 
4087 static const struct dpll_info rkl_plls[] = {
4088 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4089 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4090 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4091 	{ },
4092 };
4093 
4094 static const struct intel_dpll_mgr rkl_pll_mgr = {
4095 	.dpll_info = rkl_plls,
4096 	.compute_dplls = icl_compute_dplls,
4097 	.get_dplls = icl_get_dplls,
4098 	.put_dplls = icl_put_dplls,
4099 	.update_ref_clks = icl_update_dpll_ref_clks,
4100 	.dump_hw_state = icl_dump_hw_state,
4101 };
4102 
4103 static const struct dpll_info dg1_plls[] = {
4104 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4105 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4106 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4107 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4108 	{ },
4109 };
4110 
4111 static const struct intel_dpll_mgr dg1_pll_mgr = {
4112 	.dpll_info = dg1_plls,
4113 	.compute_dplls = icl_compute_dplls,
4114 	.get_dplls = icl_get_dplls,
4115 	.put_dplls = icl_put_dplls,
4116 	.update_ref_clks = icl_update_dpll_ref_clks,
4117 	.dump_hw_state = icl_dump_hw_state,
4118 };
4119 
4120 static const struct dpll_info adls_plls[] = {
4121 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4122 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4123 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4124 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4125 	{ },
4126 };
4127 
4128 static const struct intel_dpll_mgr adls_pll_mgr = {
4129 	.dpll_info = adls_plls,
4130 	.compute_dplls = icl_compute_dplls,
4131 	.get_dplls = icl_get_dplls,
4132 	.put_dplls = icl_put_dplls,
4133 	.update_ref_clks = icl_update_dpll_ref_clks,
4134 	.dump_hw_state = icl_dump_hw_state,
4135 };
4136 
4137 static const struct dpll_info adlp_plls[] = {
4138 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4139 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4140 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4141 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4142 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4143 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4144 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4145 	{ },
4146 };
4147 
4148 static const struct intel_dpll_mgr adlp_pll_mgr = {
4149 	.dpll_info = adlp_plls,
4150 	.compute_dplls = icl_compute_dplls,
4151 	.get_dplls = icl_get_dplls,
4152 	.put_dplls = icl_put_dplls,
4153 	.update_active_dpll = icl_update_active_dpll,
4154 	.update_ref_clks = icl_update_dpll_ref_clks,
4155 	.dump_hw_state = icl_dump_hw_state,
4156 };
4157 
4158 /**
4159  * intel_shared_dpll_init - Initialize shared DPLLs
4160  * @dev_priv: i915 device
4161  *
4162  * Initialize shared DPLLs for @dev_priv.
4163  */
4164 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4165 {
4166 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4167 	const struct dpll_info *dpll_info;
4168 	int i;
4169 
4170 	mutex_init(&dev_priv->display.dpll.lock);
4171 
4172 	if (IS_DG2(dev_priv))
4173 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4174 		dpll_mgr = NULL;
4175 	else if (IS_ALDERLAKE_P(dev_priv))
4176 		dpll_mgr = &adlp_pll_mgr;
4177 	else if (IS_ALDERLAKE_S(dev_priv))
4178 		dpll_mgr = &adls_pll_mgr;
4179 	else if (IS_DG1(dev_priv))
4180 		dpll_mgr = &dg1_pll_mgr;
4181 	else if (IS_ROCKETLAKE(dev_priv))
4182 		dpll_mgr = &rkl_pll_mgr;
4183 	else if (DISPLAY_VER(dev_priv) >= 12)
4184 		dpll_mgr = &tgl_pll_mgr;
4185 	else if (IS_JSL_EHL(dev_priv))
4186 		dpll_mgr = &ehl_pll_mgr;
4187 	else if (DISPLAY_VER(dev_priv) >= 11)
4188 		dpll_mgr = &icl_pll_mgr;
4189 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4190 		dpll_mgr = &bxt_pll_mgr;
4191 	else if (DISPLAY_VER(dev_priv) == 9)
4192 		dpll_mgr = &skl_pll_mgr;
4193 	else if (HAS_DDI(dev_priv))
4194 		dpll_mgr = &hsw_pll_mgr;
4195 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4196 		dpll_mgr = &pch_pll_mgr;
4197 
4198 	if (!dpll_mgr) {
4199 		dev_priv->display.dpll.num_shared_dpll = 0;
4200 		return;
4201 	}
4202 
4203 	dpll_info = dpll_mgr->dpll_info;
4204 
4205 	for (i = 0; dpll_info[i].name; i++) {
4206 		if (drm_WARN_ON(&dev_priv->drm,
4207 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4208 			break;
4209 
4210 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4211 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4212 	}
4213 
4214 	dev_priv->display.dpll.mgr = dpll_mgr;
4215 	dev_priv->display.dpll.num_shared_dpll = i;
4216 }
4217 
4218 /**
4219  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4220  * @state: atomic state
4221  * @crtc: CRTC to compute DPLLs for
4222  * @encoder: encoder
4223  *
4224  * This function computes the DPLL state for the given CRTC and encoder.
4225  *
4226  * The new configuration in the atomic commit @state is made effective by
4227  * calling intel_shared_dpll_swap_state().
4228  *
4229  * Returns:
4230  * 0 on success, negative error code on falure.
4231  */
4232 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4233 			       struct intel_crtc *crtc,
4234 			       struct intel_encoder *encoder)
4235 {
4236 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4237 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4238 
4239 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4240 		return -EINVAL;
4241 
4242 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4243 }
4244 
4245 /**
4246  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4247  * @state: atomic state
4248  * @crtc: CRTC to reserve DPLLs for
4249  * @encoder: encoder
4250  *
4251  * This function reserves all required DPLLs for the given CRTC and encoder
4252  * combination in the current atomic commit @state and the new @crtc atomic
4253  * state.
4254  *
4255  * The new configuration in the atomic commit @state is made effective by
4256  * calling intel_shared_dpll_swap_state().
4257  *
4258  * The reserved DPLLs should be released by calling
4259  * intel_release_shared_dplls().
4260  *
4261  * Returns:
4262  * 0 if all required DPLLs were successfully reserved,
4263  * negative error code otherwise.
4264  */
4265 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4266 			       struct intel_crtc *crtc,
4267 			       struct intel_encoder *encoder)
4268 {
4269 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4270 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4271 
4272 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4273 		return -EINVAL;
4274 
4275 	return dpll_mgr->get_dplls(state, crtc, encoder);
4276 }
4277 
4278 /**
4279  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4280  * @state: atomic state
4281  * @crtc: crtc from which the DPLLs are to be released
4282  *
4283  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4284  * from the current atomic commit @state and the old @crtc atomic state.
4285  *
4286  * The new configuration in the atomic commit @state is made effective by
4287  * calling intel_shared_dpll_swap_state().
4288  */
4289 void intel_release_shared_dplls(struct intel_atomic_state *state,
4290 				struct intel_crtc *crtc)
4291 {
4292 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4293 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4294 
4295 	/*
4296 	 * FIXME: this function is called for every platform having a
4297 	 * compute_clock hook, even though the platform doesn't yet support
4298 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4299 	 * called on those.
4300 	 */
4301 	if (!dpll_mgr)
4302 		return;
4303 
4304 	dpll_mgr->put_dplls(state, crtc);
4305 }
4306 
4307 /**
4308  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4309  * @state: atomic state
4310  * @crtc: the CRTC for which to update the active DPLL
4311  * @encoder: encoder determining the type of port DPLL
4312  *
4313  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4314  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4315  * DPLL selected will be based on the current mode of the encoder's port.
4316  */
4317 void intel_update_active_dpll(struct intel_atomic_state *state,
4318 			      struct intel_crtc *crtc,
4319 			      struct intel_encoder *encoder)
4320 {
4321 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4322 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4323 
4324 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4325 		return;
4326 
4327 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4328 }
4329 
4330 /**
4331  * intel_dpll_get_freq - calculate the DPLL's output frequency
4332  * @i915: i915 device
4333  * @pll: DPLL for which to calculate the output frequency
4334  * @pll_state: DPLL state from which to calculate the output frequency
4335  *
4336  * Return the output frequency corresponding to @pll's passed in @pll_state.
4337  */
4338 int intel_dpll_get_freq(struct drm_i915_private *i915,
4339 			const struct intel_shared_dpll *pll,
4340 			const struct intel_dpll_hw_state *pll_state)
4341 {
4342 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4343 		return 0;
4344 
4345 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4346 }
4347 
4348 /**
4349  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4350  * @i915: i915 device
4351  * @pll: DPLL for which to calculate the output frequency
4352  * @hw_state: DPLL's hardware state
4353  *
4354  * Read out @pll's hardware state into @hw_state.
4355  */
4356 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4357 			     struct intel_shared_dpll *pll,
4358 			     struct intel_dpll_hw_state *hw_state)
4359 {
4360 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4361 }
4362 
4363 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4364 				  struct intel_shared_dpll *pll)
4365 {
4366 	struct intel_crtc *crtc;
4367 
4368 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4369 
4370 	if (IS_JSL_EHL(i915) && pll->on &&
4371 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4372 		pll->wakeref = intel_display_power_get(i915,
4373 						       POWER_DOMAIN_DC_OFF);
4374 	}
4375 
4376 	pll->state.pipe_mask = 0;
4377 	for_each_intel_crtc(&i915->drm, crtc) {
4378 		struct intel_crtc_state *crtc_state =
4379 			to_intel_crtc_state(crtc->base.state);
4380 
4381 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4382 			pll->state.pipe_mask |= BIT(crtc->pipe);
4383 	}
4384 	pll->active_mask = pll->state.pipe_mask;
4385 
4386 	drm_dbg_kms(&i915->drm,
4387 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4388 		    pll->info->name, pll->state.pipe_mask, pll->on);
4389 }
4390 
4391 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4392 {
4393 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4394 		i915->display.dpll.mgr->update_ref_clks(i915);
4395 }
4396 
4397 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4398 {
4399 	int i;
4400 
4401 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4402 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4403 }
4404 
4405 static void sanitize_dpll_state(struct drm_i915_private *i915,
4406 				struct intel_shared_dpll *pll)
4407 {
4408 	if (!pll->on)
4409 		return;
4410 
4411 	adlp_cmtg_clock_gating_wa(i915, pll);
4412 
4413 	if (pll->active_mask)
4414 		return;
4415 
4416 	drm_dbg_kms(&i915->drm,
4417 		    "%s enabled but not in use, disabling\n",
4418 		    pll->info->name);
4419 
4420 	pll->info->funcs->disable(i915, pll);
4421 	pll->on = false;
4422 }
4423 
4424 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4425 {
4426 	int i;
4427 
4428 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4429 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4430 }
4431 
4432 /**
4433  * intel_dpll_dump_hw_state - write hw_state to dmesg
4434  * @dev_priv: i915 drm device
4435  * @hw_state: hw state to be written to the log
4436  *
4437  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4438  */
4439 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4440 			      const struct intel_dpll_hw_state *hw_state)
4441 {
4442 	if (dev_priv->display.dpll.mgr) {
4443 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4444 	} else {
4445 		/* fallback for platforms that don't use the shared dpll
4446 		 * infrastructure
4447 		 */
4448 		drm_dbg_kms(&dev_priv->drm,
4449 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4450 			    "fp0: 0x%x, fp1: 0x%x\n",
4451 			    hw_state->dpll,
4452 			    hw_state->dpll_md,
4453 			    hw_state->fp0,
4454 			    hw_state->fp1);
4455 	}
4456 }
4457 
4458 static void
4459 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4460 			 struct intel_shared_dpll *pll,
4461 			 struct intel_crtc *crtc,
4462 			 struct intel_crtc_state *new_crtc_state)
4463 {
4464 	struct intel_dpll_hw_state dpll_hw_state;
4465 	u8 pipe_mask;
4466 	bool active;
4467 
4468 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4469 
4470 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4471 
4472 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4473 
4474 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4475 		I915_STATE_WARN(!pll->on && pll->active_mask,
4476 				"pll in active use but not on in sw tracking\n");
4477 		I915_STATE_WARN(pll->on && !pll->active_mask,
4478 				"pll is on but not used by any active pipe\n");
4479 		I915_STATE_WARN(pll->on != active,
4480 				"pll on state mismatch (expected %i, found %i)\n",
4481 				pll->on, active);
4482 	}
4483 
4484 	if (!crtc) {
4485 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4486 				"more active pll users than references: 0x%x vs 0x%x\n",
4487 				pll->active_mask, pll->state.pipe_mask);
4488 
4489 		return;
4490 	}
4491 
4492 	pipe_mask = BIT(crtc->pipe);
4493 
4494 	if (new_crtc_state->hw.active)
4495 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4496 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4497 				pipe_name(crtc->pipe), pll->active_mask);
4498 	else
4499 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4500 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4501 				pipe_name(crtc->pipe), pll->active_mask);
4502 
4503 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4504 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4505 			pipe_mask, pll->state.pipe_mask);
4506 
4507 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4508 					  &dpll_hw_state,
4509 					  sizeof(dpll_hw_state)),
4510 			"pll hw state mismatch\n");
4511 }
4512 
4513 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4514 				    struct intel_crtc_state *old_crtc_state,
4515 				    struct intel_crtc_state *new_crtc_state)
4516 {
4517 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4518 
4519 	if (new_crtc_state->shared_dpll)
4520 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4521 					 crtc, new_crtc_state);
4522 
4523 	if (old_crtc_state->shared_dpll &&
4524 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4525 		u8 pipe_mask = BIT(crtc->pipe);
4526 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4527 
4528 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4529 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4530 				pipe_name(crtc->pipe), pll->active_mask);
4531 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4532 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4533 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4534 	}
4535 }
4536 
4537 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4538 {
4539 	int i;
4540 
4541 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4542 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4543 					 NULL, NULL);
4544 }
4545