1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "intel_de.h"
27 #include "intel_display_types.h"
28 #include "intel_dpio_phy.h"
29 #include "intel_dpll.h"
30 #include "intel_dpll_mgr.h"
31 #include "intel_pch_refclk.h"
32 #include "intel_tc.h"
33 #include "intel_tc_phy_regs.h"
34 
35 /**
36  * DOC: Display PLLs
37  *
38  * Display PLLs used for driving outputs vary by platform. While some have
39  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
40  * from a pool. In the latter scenario, it is possible that multiple pipes
41  * share a PLL if their configurations match.
42  *
43  * This file provides an abstraction over display PLLs. The function
44  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
45  * users of a PLL are tracked and that tracking is integrated with the atomic
46  * modset interface. During an atomic operation, required PLLs can be reserved
47  * for a given CRTC and encoder configuration by calling
48  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
49  * with intel_release_shared_dplls().
50  * Changes to the users are first staged in the atomic state, and then made
51  * effective by calling intel_shared_dpll_swap_state() during the atomic
52  * commit phase.
53  */
54 
55 /* platform specific hooks for managing DPLLs */
56 struct intel_shared_dpll_funcs {
57 	/*
58 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
59 	 * the pll is not already enabled.
60 	 */
61 	void (*enable)(struct drm_i915_private *i915,
62 		       struct intel_shared_dpll *pll);
63 
64 	/*
65 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
66 	 * only when it is safe to disable the pll, i.e., there are no more
67 	 * tracked users for it.
68 	 */
69 	void (*disable)(struct drm_i915_private *i915,
70 			struct intel_shared_dpll *pll);
71 
72 	/*
73 	 * Hook for reading the values currently programmed to the DPLL
74 	 * registers. This is used for initial hw state readout and state
75 	 * verification after a mode set.
76 	 */
77 	bool (*get_hw_state)(struct drm_i915_private *i915,
78 			     struct intel_shared_dpll *pll,
79 			     struct intel_dpll_hw_state *hw_state);
80 
81 	/*
82 	 * Hook for calculating the pll's output frequency based on its passed
83 	 * in state.
84 	 */
85 	int (*get_freq)(struct drm_i915_private *i915,
86 			const struct intel_shared_dpll *pll,
87 			const struct intel_dpll_hw_state *pll_state);
88 };
89 
90 struct intel_dpll_mgr {
91 	const struct dpll_info *dpll_info;
92 
93 	int (*compute_dplls)(struct intel_atomic_state *state,
94 			     struct intel_crtc *crtc,
95 			     struct intel_encoder *encoder);
96 	int (*get_dplls)(struct intel_atomic_state *state,
97 			 struct intel_crtc *crtc,
98 			 struct intel_encoder *encoder);
99 	void (*put_dplls)(struct intel_atomic_state *state,
100 			  struct intel_crtc *crtc);
101 	void (*update_active_dpll)(struct intel_atomic_state *state,
102 				   struct intel_crtc *crtc,
103 				   struct intel_encoder *encoder);
104 	void (*update_ref_clks)(struct drm_i915_private *i915);
105 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
106 			      const struct intel_dpll_hw_state *hw_state);
107 };
108 
109 static void
110 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
111 				  struct intel_shared_dpll_state *shared_dpll)
112 {
113 	enum intel_dpll_id i;
114 
115 	/* Copy shared dpll state */
116 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
117 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
118 
119 		shared_dpll[i] = pll->state;
120 	}
121 }
122 
123 static struct intel_shared_dpll_state *
124 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
125 {
126 	struct intel_atomic_state *state = to_intel_atomic_state(s);
127 
128 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
129 
130 	if (!state->dpll_set) {
131 		state->dpll_set = true;
132 
133 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
134 						  state->shared_dpll);
135 	}
136 
137 	return state->shared_dpll;
138 }
139 
140 /**
141  * intel_get_shared_dpll_by_id - get a DPLL given its id
142  * @dev_priv: i915 device instance
143  * @id: pll id
144  *
145  * Returns:
146  * A pointer to the DPLL with @id
147  */
148 struct intel_shared_dpll *
149 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
150 			    enum intel_dpll_id id)
151 {
152 	return &dev_priv->display.dpll.shared_dplls[id];
153 }
154 
155 /**
156  * intel_get_shared_dpll_id - get the id of a DPLL
157  * @dev_priv: i915 device instance
158  * @pll: the DPLL
159  *
160  * Returns:
161  * The id of @pll
162  */
163 enum intel_dpll_id
164 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
165 			 struct intel_shared_dpll *pll)
166 {
167 	long pll_idx = pll - dev_priv->display.dpll.shared_dplls;
168 
169 	if (drm_WARN_ON(&dev_priv->drm,
170 			pll_idx < 0 ||
171 			pll_idx >= dev_priv->display.dpll.num_shared_dpll))
172 		return -1;
173 
174 	return pll_idx;
175 }
176 
177 /* For ILK+ */
178 void assert_shared_dpll(struct drm_i915_private *dev_priv,
179 			struct intel_shared_dpll *pll,
180 			bool state)
181 {
182 	bool cur_state;
183 	struct intel_dpll_hw_state hw_state;
184 
185 	if (drm_WARN(&dev_priv->drm, !pll,
186 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
187 		return;
188 
189 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
190 	I915_STATE_WARN(cur_state != state,
191 	     "%s assertion failure (expected %s, current %s)\n",
192 			pll->info->name, str_on_off(state),
193 			str_on_off(cur_state));
194 }
195 
196 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
197 {
198 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
199 }
200 
201 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
202 {
203 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
204 }
205 
206 static i915_reg_t
207 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
208 			   struct intel_shared_dpll *pll)
209 {
210 	if (IS_DG1(i915))
211 		return DG1_DPLL_ENABLE(pll->info->id);
212 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
213 		return MG_PLL_ENABLE(0);
214 
215 	return ICL_DPLL_ENABLE(pll->info->id);
216 }
217 
218 static i915_reg_t
219 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
220 			struct intel_shared_dpll *pll)
221 {
222 	const enum intel_dpll_id id = pll->info->id;
223 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
224 
225 	if (IS_ALDERLAKE_P(i915))
226 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
227 
228 	return MG_PLL_ENABLE(tc_port);
229 }
230 
231 /**
232  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
233  * @crtc_state: CRTC, and its state, which has a shared DPLL
234  *
235  * Enable the shared DPLL used by @crtc.
236  */
237 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
238 {
239 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
240 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
241 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
242 	unsigned int pipe_mask = BIT(crtc->pipe);
243 	unsigned int old_mask;
244 
245 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
246 		return;
247 
248 	mutex_lock(&dev_priv->display.dpll.lock);
249 	old_mask = pll->active_mask;
250 
251 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
252 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
253 		goto out;
254 
255 	pll->active_mask |= pipe_mask;
256 
257 	drm_dbg_kms(&dev_priv->drm,
258 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
259 		    pll->info->name, pll->active_mask, pll->on,
260 		    crtc->base.base.id, crtc->base.name);
261 
262 	if (old_mask) {
263 		drm_WARN_ON(&dev_priv->drm, !pll->on);
264 		assert_shared_dpll_enabled(dev_priv, pll);
265 		goto out;
266 	}
267 	drm_WARN_ON(&dev_priv->drm, pll->on);
268 
269 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
270 	pll->info->funcs->enable(dev_priv, pll);
271 	pll->on = true;
272 
273 out:
274 	mutex_unlock(&dev_priv->display.dpll.lock);
275 }
276 
277 /**
278  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
279  * @crtc_state: CRTC, and its state, which has a shared DPLL
280  *
281  * Disable the shared DPLL used by @crtc.
282  */
283 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
284 {
285 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
286 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
288 	unsigned int pipe_mask = BIT(crtc->pipe);
289 
290 	/* PCH only available on ILK+ */
291 	if (DISPLAY_VER(dev_priv) < 5)
292 		return;
293 
294 	if (pll == NULL)
295 		return;
296 
297 	mutex_lock(&dev_priv->display.dpll.lock);
298 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
299 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
300 		     crtc->base.base.id, crtc->base.name))
301 		goto out;
302 
303 	drm_dbg_kms(&dev_priv->drm,
304 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
305 		    pll->info->name, pll->active_mask, pll->on,
306 		    crtc->base.base.id, crtc->base.name);
307 
308 	assert_shared_dpll_enabled(dev_priv, pll);
309 	drm_WARN_ON(&dev_priv->drm, !pll->on);
310 
311 	pll->active_mask &= ~pipe_mask;
312 	if (pll->active_mask)
313 		goto out;
314 
315 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
316 	pll->info->funcs->disable(dev_priv, pll);
317 	pll->on = false;
318 
319 out:
320 	mutex_unlock(&dev_priv->display.dpll.lock);
321 }
322 
323 static struct intel_shared_dpll *
324 intel_find_shared_dpll(struct intel_atomic_state *state,
325 		       const struct intel_crtc *crtc,
326 		       const struct intel_dpll_hw_state *pll_state,
327 		       unsigned long dpll_mask)
328 {
329 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
330 	struct intel_shared_dpll *pll, *unused_pll = NULL;
331 	struct intel_shared_dpll_state *shared_dpll;
332 	enum intel_dpll_id i;
333 
334 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
335 
336 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
337 
338 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
339 		pll = &dev_priv->display.dpll.shared_dplls[i];
340 
341 		/* Only want to check enabled timings first */
342 		if (shared_dpll[i].pipe_mask == 0) {
343 			if (!unused_pll)
344 				unused_pll = pll;
345 			continue;
346 		}
347 
348 		if (memcmp(pll_state,
349 			   &shared_dpll[i].hw_state,
350 			   sizeof(*pll_state)) == 0) {
351 			drm_dbg_kms(&dev_priv->drm,
352 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
353 				    crtc->base.base.id, crtc->base.name,
354 				    pll->info->name,
355 				    shared_dpll[i].pipe_mask,
356 				    pll->active_mask);
357 			return pll;
358 		}
359 	}
360 
361 	/* Ok no matching timings, maybe there's a free one? */
362 	if (unused_pll) {
363 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
364 			    crtc->base.base.id, crtc->base.name,
365 			    unused_pll->info->name);
366 		return unused_pll;
367 	}
368 
369 	return NULL;
370 }
371 
372 static void
373 intel_reference_shared_dpll(struct intel_atomic_state *state,
374 			    const struct intel_crtc *crtc,
375 			    const struct intel_shared_dpll *pll,
376 			    const struct intel_dpll_hw_state *pll_state)
377 {
378 	struct drm_i915_private *i915 = to_i915(state->base.dev);
379 	struct intel_shared_dpll_state *shared_dpll;
380 	const enum intel_dpll_id id = pll->info->id;
381 
382 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
383 
384 	if (shared_dpll[id].pipe_mask == 0)
385 		shared_dpll[id].hw_state = *pll_state;
386 
387 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
388 		pipe_name(crtc->pipe));
389 
390 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
391 }
392 
393 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
394 					  const struct intel_crtc *crtc,
395 					  const struct intel_shared_dpll *pll)
396 {
397 	struct intel_shared_dpll_state *shared_dpll;
398 
399 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
400 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
401 }
402 
403 static void intel_put_dpll(struct intel_atomic_state *state,
404 			   struct intel_crtc *crtc)
405 {
406 	const struct intel_crtc_state *old_crtc_state =
407 		intel_atomic_get_old_crtc_state(state, crtc);
408 	struct intel_crtc_state *new_crtc_state =
409 		intel_atomic_get_new_crtc_state(state, crtc);
410 
411 	new_crtc_state->shared_dpll = NULL;
412 
413 	if (!old_crtc_state->shared_dpll)
414 		return;
415 
416 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
417 }
418 
419 /**
420  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
421  * @state: atomic state
422  *
423  * This is the dpll version of drm_atomic_helper_swap_state() since the
424  * helper does not handle driver-specific global state.
425  *
426  * For consistency with atomic helpers this function does a complete swap,
427  * i.e. it also puts the current state into @state, even though there is no
428  * need for that at this moment.
429  */
430 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
431 {
432 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
433 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
434 	enum intel_dpll_id i;
435 
436 	if (!state->dpll_set)
437 		return;
438 
439 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
440 		struct intel_shared_dpll *pll =
441 			&dev_priv->display.dpll.shared_dplls[i];
442 
443 		swap(pll->state, shared_dpll[i]);
444 	}
445 }
446 
447 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
448 				      struct intel_shared_dpll *pll,
449 				      struct intel_dpll_hw_state *hw_state)
450 {
451 	const enum intel_dpll_id id = pll->info->id;
452 	intel_wakeref_t wakeref;
453 	u32 val;
454 
455 	wakeref = intel_display_power_get_if_enabled(dev_priv,
456 						     POWER_DOMAIN_DISPLAY_CORE);
457 	if (!wakeref)
458 		return false;
459 
460 	val = intel_de_read(dev_priv, PCH_DPLL(id));
461 	hw_state->dpll = val;
462 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
463 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
464 
465 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
466 
467 	return val & DPLL_VCO_ENABLE;
468 }
469 
470 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
471 {
472 	u32 val;
473 	bool enabled;
474 
475 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
476 
477 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
478 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
479 			    DREF_SUPERSPREAD_SOURCE_MASK));
480 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
481 }
482 
483 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
484 				struct intel_shared_dpll *pll)
485 {
486 	const enum intel_dpll_id id = pll->info->id;
487 
488 	/* PCH refclock must be enabled first */
489 	ibx_assert_pch_refclk_enabled(dev_priv);
490 
491 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
492 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
493 
494 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
495 
496 	/* Wait for the clocks to stabilize. */
497 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
498 	udelay(150);
499 
500 	/* The pixel multiplier can only be updated once the
501 	 * DPLL is enabled and the clocks are stable.
502 	 *
503 	 * So write it again.
504 	 */
505 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
506 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
507 	udelay(200);
508 }
509 
510 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
511 				 struct intel_shared_dpll *pll)
512 {
513 	const enum intel_dpll_id id = pll->info->id;
514 
515 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
516 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
517 	udelay(200);
518 }
519 
520 static int ibx_compute_dpll(struct intel_atomic_state *state,
521 			    struct intel_crtc *crtc,
522 			    struct intel_encoder *encoder)
523 {
524 	return 0;
525 }
526 
527 static int ibx_get_dpll(struct intel_atomic_state *state,
528 			struct intel_crtc *crtc,
529 			struct intel_encoder *encoder)
530 {
531 	struct intel_crtc_state *crtc_state =
532 		intel_atomic_get_new_crtc_state(state, crtc);
533 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
534 	struct intel_shared_dpll *pll;
535 	enum intel_dpll_id i;
536 
537 	if (HAS_PCH_IBX(dev_priv)) {
538 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
539 		i = (enum intel_dpll_id) crtc->pipe;
540 		pll = &dev_priv->display.dpll.shared_dplls[i];
541 
542 		drm_dbg_kms(&dev_priv->drm,
543 			    "[CRTC:%d:%s] using pre-allocated %s\n",
544 			    crtc->base.base.id, crtc->base.name,
545 			    pll->info->name);
546 	} else {
547 		pll = intel_find_shared_dpll(state, crtc,
548 					     &crtc_state->dpll_hw_state,
549 					     BIT(DPLL_ID_PCH_PLL_B) |
550 					     BIT(DPLL_ID_PCH_PLL_A));
551 	}
552 
553 	if (!pll)
554 		return -EINVAL;
555 
556 	/* reference the pll */
557 	intel_reference_shared_dpll(state, crtc,
558 				    pll, &crtc_state->dpll_hw_state);
559 
560 	crtc_state->shared_dpll = pll;
561 
562 	return 0;
563 }
564 
565 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
566 			      const struct intel_dpll_hw_state *hw_state)
567 {
568 	drm_dbg_kms(&dev_priv->drm,
569 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
570 		    "fp0: 0x%x, fp1: 0x%x\n",
571 		    hw_state->dpll,
572 		    hw_state->dpll_md,
573 		    hw_state->fp0,
574 		    hw_state->fp1);
575 }
576 
577 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
578 	.enable = ibx_pch_dpll_enable,
579 	.disable = ibx_pch_dpll_disable,
580 	.get_hw_state = ibx_pch_dpll_get_hw_state,
581 };
582 
583 static const struct dpll_info pch_plls[] = {
584 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
585 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
586 	{ },
587 };
588 
589 static const struct intel_dpll_mgr pch_pll_mgr = {
590 	.dpll_info = pch_plls,
591 	.compute_dplls = ibx_compute_dpll,
592 	.get_dplls = ibx_get_dpll,
593 	.put_dplls = intel_put_dpll,
594 	.dump_hw_state = ibx_dump_hw_state,
595 };
596 
597 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
598 				 struct intel_shared_dpll *pll)
599 {
600 	const enum intel_dpll_id id = pll->info->id;
601 
602 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
603 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
604 	udelay(20);
605 }
606 
607 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
608 				struct intel_shared_dpll *pll)
609 {
610 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
611 	intel_de_posting_read(dev_priv, SPLL_CTL);
612 	udelay(20);
613 }
614 
615 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
616 				  struct intel_shared_dpll *pll)
617 {
618 	const enum intel_dpll_id id = pll->info->id;
619 	u32 val;
620 
621 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
622 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
623 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
624 
625 	/*
626 	 * Try to set up the PCH reference clock once all DPLLs
627 	 * that depend on it have been shut down.
628 	 */
629 	if (dev_priv->pch_ssc_use & BIT(id))
630 		intel_init_pch_refclk(dev_priv);
631 }
632 
633 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
634 				 struct intel_shared_dpll *pll)
635 {
636 	enum intel_dpll_id id = pll->info->id;
637 	u32 val;
638 
639 	val = intel_de_read(dev_priv, SPLL_CTL);
640 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
641 	intel_de_posting_read(dev_priv, SPLL_CTL);
642 
643 	/*
644 	 * Try to set up the PCH reference clock once all DPLLs
645 	 * that depend on it have been shut down.
646 	 */
647 	if (dev_priv->pch_ssc_use & BIT(id))
648 		intel_init_pch_refclk(dev_priv);
649 }
650 
651 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
652 				       struct intel_shared_dpll *pll,
653 				       struct intel_dpll_hw_state *hw_state)
654 {
655 	const enum intel_dpll_id id = pll->info->id;
656 	intel_wakeref_t wakeref;
657 	u32 val;
658 
659 	wakeref = intel_display_power_get_if_enabled(dev_priv,
660 						     POWER_DOMAIN_DISPLAY_CORE);
661 	if (!wakeref)
662 		return false;
663 
664 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
665 	hw_state->wrpll = val;
666 
667 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
668 
669 	return val & WRPLL_PLL_ENABLE;
670 }
671 
672 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
673 				      struct intel_shared_dpll *pll,
674 				      struct intel_dpll_hw_state *hw_state)
675 {
676 	intel_wakeref_t wakeref;
677 	u32 val;
678 
679 	wakeref = intel_display_power_get_if_enabled(dev_priv,
680 						     POWER_DOMAIN_DISPLAY_CORE);
681 	if (!wakeref)
682 		return false;
683 
684 	val = intel_de_read(dev_priv, SPLL_CTL);
685 	hw_state->spll = val;
686 
687 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
688 
689 	return val & SPLL_PLL_ENABLE;
690 }
691 
692 #define LC_FREQ 2700
693 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
694 
695 #define P_MIN 2
696 #define P_MAX 64
697 #define P_INC 2
698 
699 /* Constraints for PLL good behavior */
700 #define REF_MIN 48
701 #define REF_MAX 400
702 #define VCO_MIN 2400
703 #define VCO_MAX 4800
704 
705 struct hsw_wrpll_rnp {
706 	unsigned p, n2, r2;
707 };
708 
709 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
710 {
711 	unsigned budget;
712 
713 	switch (clock) {
714 	case 25175000:
715 	case 25200000:
716 	case 27000000:
717 	case 27027000:
718 	case 37762500:
719 	case 37800000:
720 	case 40500000:
721 	case 40541000:
722 	case 54000000:
723 	case 54054000:
724 	case 59341000:
725 	case 59400000:
726 	case 72000000:
727 	case 74176000:
728 	case 74250000:
729 	case 81000000:
730 	case 81081000:
731 	case 89012000:
732 	case 89100000:
733 	case 108000000:
734 	case 108108000:
735 	case 111264000:
736 	case 111375000:
737 	case 148352000:
738 	case 148500000:
739 	case 162000000:
740 	case 162162000:
741 	case 222525000:
742 	case 222750000:
743 	case 296703000:
744 	case 297000000:
745 		budget = 0;
746 		break;
747 	case 233500000:
748 	case 245250000:
749 	case 247750000:
750 	case 253250000:
751 	case 298000000:
752 		budget = 1500;
753 		break;
754 	case 169128000:
755 	case 169500000:
756 	case 179500000:
757 	case 202000000:
758 		budget = 2000;
759 		break;
760 	case 256250000:
761 	case 262500000:
762 	case 270000000:
763 	case 272500000:
764 	case 273750000:
765 	case 280750000:
766 	case 281250000:
767 	case 286000000:
768 	case 291750000:
769 		budget = 4000;
770 		break;
771 	case 267250000:
772 	case 268500000:
773 		budget = 5000;
774 		break;
775 	default:
776 		budget = 1000;
777 		break;
778 	}
779 
780 	return budget;
781 }
782 
783 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
784 				 unsigned int r2, unsigned int n2,
785 				 unsigned int p,
786 				 struct hsw_wrpll_rnp *best)
787 {
788 	u64 a, b, c, d, diff, diff_best;
789 
790 	/* No best (r,n,p) yet */
791 	if (best->p == 0) {
792 		best->p = p;
793 		best->n2 = n2;
794 		best->r2 = r2;
795 		return;
796 	}
797 
798 	/*
799 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
800 	 * freq2k.
801 	 *
802 	 * delta = 1e6 *
803 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
804 	 *	   freq2k;
805 	 *
806 	 * and we would like delta <= budget.
807 	 *
808 	 * If the discrepancy is above the PPM-based budget, always prefer to
809 	 * improve upon the previous solution.  However, if you're within the
810 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
811 	 */
812 	a = freq2k * budget * p * r2;
813 	b = freq2k * budget * best->p * best->r2;
814 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
815 	diff_best = abs_diff(freq2k * best->p * best->r2,
816 			     LC_FREQ_2K * best->n2);
817 	c = 1000000 * diff;
818 	d = 1000000 * diff_best;
819 
820 	if (a < c && b < d) {
821 		/* If both are above the budget, pick the closer */
822 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
823 			best->p = p;
824 			best->n2 = n2;
825 			best->r2 = r2;
826 		}
827 	} else if (a >= c && b < d) {
828 		/* If A is below the threshold but B is above it?  Update. */
829 		best->p = p;
830 		best->n2 = n2;
831 		best->r2 = r2;
832 	} else if (a >= c && b >= d) {
833 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
834 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
835 			best->p = p;
836 			best->n2 = n2;
837 			best->r2 = r2;
838 		}
839 	}
840 	/* Otherwise a < c && b >= d, do nothing */
841 }
842 
843 static void
844 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
845 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
846 {
847 	u64 freq2k;
848 	unsigned p, n2, r2;
849 	struct hsw_wrpll_rnp best = {};
850 	unsigned budget;
851 
852 	freq2k = clock / 100;
853 
854 	budget = hsw_wrpll_get_budget_for_freq(clock);
855 
856 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
857 	 * and directly pass the LC PLL to it. */
858 	if (freq2k == 5400000) {
859 		*n2_out = 2;
860 		*p_out = 1;
861 		*r2_out = 2;
862 		return;
863 	}
864 
865 	/*
866 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
867 	 * the WR PLL.
868 	 *
869 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
870 	 * Injecting R2 = 2 * R gives:
871 	 *   REF_MAX * r2 > LC_FREQ * 2 and
872 	 *   REF_MIN * r2 < LC_FREQ * 2
873 	 *
874 	 * Which means the desired boundaries for r2 are:
875 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
876 	 *
877 	 */
878 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
879 	     r2 <= LC_FREQ * 2 / REF_MIN;
880 	     r2++) {
881 
882 		/*
883 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
884 		 *
885 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
886 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
887 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
888 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
889 		 *
890 		 * Which means the desired boundaries for n2 are:
891 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
892 		 */
893 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
894 		     n2 <= VCO_MAX * r2 / LC_FREQ;
895 		     n2++) {
896 
897 			for (p = P_MIN; p <= P_MAX; p += P_INC)
898 				hsw_wrpll_update_rnp(freq2k, budget,
899 						     r2, n2, p, &best);
900 		}
901 	}
902 
903 	*n2_out = best.n2;
904 	*p_out = best.p;
905 	*r2_out = best.r2;
906 }
907 
908 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
909 				  const struct intel_shared_dpll *pll,
910 				  const struct intel_dpll_hw_state *pll_state)
911 {
912 	int refclk;
913 	int n, p, r;
914 	u32 wrpll = pll_state->wrpll;
915 
916 	switch (wrpll & WRPLL_REF_MASK) {
917 	case WRPLL_REF_SPECIAL_HSW:
918 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
919 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
920 			refclk = dev_priv->display.dpll.ref_clks.nssc;
921 			break;
922 		}
923 		fallthrough;
924 	case WRPLL_REF_PCH_SSC:
925 		/*
926 		 * We could calculate spread here, but our checking
927 		 * code only cares about 5% accuracy, and spread is a max of
928 		 * 0.5% downspread.
929 		 */
930 		refclk = dev_priv->display.dpll.ref_clks.ssc;
931 		break;
932 	case WRPLL_REF_LCPLL:
933 		refclk = 2700000;
934 		break;
935 	default:
936 		MISSING_CASE(wrpll);
937 		return 0;
938 	}
939 
940 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
941 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
942 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
943 
944 	/* Convert to KHz, p & r have a fixed point portion */
945 	return (refclk * n / 10) / (p * r) * 2;
946 }
947 
948 static int
949 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
950 			   struct intel_crtc *crtc)
951 {
952 	struct drm_i915_private *i915 = to_i915(state->base.dev);
953 	struct intel_crtc_state *crtc_state =
954 		intel_atomic_get_new_crtc_state(state, crtc);
955 	unsigned int p, n2, r2;
956 
957 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
958 
959 	crtc_state->dpll_hw_state.wrpll =
960 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
961 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
962 		WRPLL_DIVIDER_POST(p);
963 
964 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
965 							&crtc_state->dpll_hw_state);
966 
967 	return 0;
968 }
969 
970 static struct intel_shared_dpll *
971 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
972 		       struct intel_crtc *crtc)
973 {
974 	struct intel_crtc_state *crtc_state =
975 		intel_atomic_get_new_crtc_state(state, crtc);
976 
977 	return intel_find_shared_dpll(state, crtc,
978 				      &crtc_state->dpll_hw_state,
979 				      BIT(DPLL_ID_WRPLL2) |
980 				      BIT(DPLL_ID_WRPLL1));
981 }
982 
983 static int
984 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
985 {
986 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
987 	int clock = crtc_state->port_clock;
988 
989 	switch (clock / 2) {
990 	case 81000:
991 	case 135000:
992 	case 270000:
993 		return 0;
994 	default:
995 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
996 			    clock);
997 		return -EINVAL;
998 	}
999 }
1000 
1001 static struct intel_shared_dpll *
1002 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
1003 {
1004 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1005 	struct intel_shared_dpll *pll;
1006 	enum intel_dpll_id pll_id;
1007 	int clock = crtc_state->port_clock;
1008 
1009 	switch (clock / 2) {
1010 	case 81000:
1011 		pll_id = DPLL_ID_LCPLL_810;
1012 		break;
1013 	case 135000:
1014 		pll_id = DPLL_ID_LCPLL_1350;
1015 		break;
1016 	case 270000:
1017 		pll_id = DPLL_ID_LCPLL_2700;
1018 		break;
1019 	default:
1020 		MISSING_CASE(clock / 2);
1021 		return NULL;
1022 	}
1023 
1024 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1025 
1026 	if (!pll)
1027 		return NULL;
1028 
1029 	return pll;
1030 }
1031 
1032 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1033 				  const struct intel_shared_dpll *pll,
1034 				  const struct intel_dpll_hw_state *pll_state)
1035 {
1036 	int link_clock = 0;
1037 
1038 	switch (pll->info->id) {
1039 	case DPLL_ID_LCPLL_810:
1040 		link_clock = 81000;
1041 		break;
1042 	case DPLL_ID_LCPLL_1350:
1043 		link_clock = 135000;
1044 		break;
1045 	case DPLL_ID_LCPLL_2700:
1046 		link_clock = 270000;
1047 		break;
1048 	default:
1049 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1050 		break;
1051 	}
1052 
1053 	return link_clock * 2;
1054 }
1055 
1056 static int
1057 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1058 			  struct intel_crtc *crtc)
1059 {
1060 	struct intel_crtc_state *crtc_state =
1061 		intel_atomic_get_new_crtc_state(state, crtc);
1062 
1063 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1064 		return -EINVAL;
1065 
1066 	crtc_state->dpll_hw_state.spll =
1067 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1068 
1069 	return 0;
1070 }
1071 
1072 static struct intel_shared_dpll *
1073 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1074 		      struct intel_crtc *crtc)
1075 {
1076 	struct intel_crtc_state *crtc_state =
1077 		intel_atomic_get_new_crtc_state(state, crtc);
1078 
1079 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1080 				      BIT(DPLL_ID_SPLL));
1081 }
1082 
1083 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1084 				 const struct intel_shared_dpll *pll,
1085 				 const struct intel_dpll_hw_state *pll_state)
1086 {
1087 	int link_clock = 0;
1088 
1089 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1090 	case SPLL_FREQ_810MHz:
1091 		link_clock = 81000;
1092 		break;
1093 	case SPLL_FREQ_1350MHz:
1094 		link_clock = 135000;
1095 		break;
1096 	case SPLL_FREQ_2700MHz:
1097 		link_clock = 270000;
1098 		break;
1099 	default:
1100 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1101 		break;
1102 	}
1103 
1104 	return link_clock * 2;
1105 }
1106 
1107 static int hsw_compute_dpll(struct intel_atomic_state *state,
1108 			    struct intel_crtc *crtc,
1109 			    struct intel_encoder *encoder)
1110 {
1111 	struct intel_crtc_state *crtc_state =
1112 		intel_atomic_get_new_crtc_state(state, crtc);
1113 
1114 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1115 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1116 	else if (intel_crtc_has_dp_encoder(crtc_state))
1117 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1118 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1119 		return hsw_ddi_spll_compute_dpll(state, crtc);
1120 	else
1121 		return -EINVAL;
1122 }
1123 
1124 static int hsw_get_dpll(struct intel_atomic_state *state,
1125 			struct intel_crtc *crtc,
1126 			struct intel_encoder *encoder)
1127 {
1128 	struct intel_crtc_state *crtc_state =
1129 		intel_atomic_get_new_crtc_state(state, crtc);
1130 	struct intel_shared_dpll *pll = NULL;
1131 
1132 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1133 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1134 	else if (intel_crtc_has_dp_encoder(crtc_state))
1135 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1136 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1137 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1138 
1139 	if (!pll)
1140 		return -EINVAL;
1141 
1142 	intel_reference_shared_dpll(state, crtc,
1143 				    pll, &crtc_state->dpll_hw_state);
1144 
1145 	crtc_state->shared_dpll = pll;
1146 
1147 	return 0;
1148 }
1149 
1150 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1151 {
1152 	i915->display.dpll.ref_clks.ssc = 135000;
1153 	/* Non-SSC is only used on non-ULT HSW. */
1154 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1155 		i915->display.dpll.ref_clks.nssc = 24000;
1156 	else
1157 		i915->display.dpll.ref_clks.nssc = 135000;
1158 }
1159 
1160 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1161 			      const struct intel_dpll_hw_state *hw_state)
1162 {
1163 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1164 		    hw_state->wrpll, hw_state->spll);
1165 }
1166 
1167 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1168 	.enable = hsw_ddi_wrpll_enable,
1169 	.disable = hsw_ddi_wrpll_disable,
1170 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1171 	.get_freq = hsw_ddi_wrpll_get_freq,
1172 };
1173 
1174 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1175 	.enable = hsw_ddi_spll_enable,
1176 	.disable = hsw_ddi_spll_disable,
1177 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1178 	.get_freq = hsw_ddi_spll_get_freq,
1179 };
1180 
1181 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1182 				 struct intel_shared_dpll *pll)
1183 {
1184 }
1185 
1186 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1187 				  struct intel_shared_dpll *pll)
1188 {
1189 }
1190 
1191 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1192 				       struct intel_shared_dpll *pll,
1193 				       struct intel_dpll_hw_state *hw_state)
1194 {
1195 	return true;
1196 }
1197 
1198 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1199 	.enable = hsw_ddi_lcpll_enable,
1200 	.disable = hsw_ddi_lcpll_disable,
1201 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1202 	.get_freq = hsw_ddi_lcpll_get_freq,
1203 };
1204 
1205 static const struct dpll_info hsw_plls[] = {
1206 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1207 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1208 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1209 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1210 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1211 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1212 	{ },
1213 };
1214 
1215 static const struct intel_dpll_mgr hsw_pll_mgr = {
1216 	.dpll_info = hsw_plls,
1217 	.compute_dplls = hsw_compute_dpll,
1218 	.get_dplls = hsw_get_dpll,
1219 	.put_dplls = intel_put_dpll,
1220 	.update_ref_clks = hsw_update_dpll_ref_clks,
1221 	.dump_hw_state = hsw_dump_hw_state,
1222 };
1223 
1224 struct skl_dpll_regs {
1225 	i915_reg_t ctl, cfgcr1, cfgcr2;
1226 };
1227 
1228 /* this array is indexed by the *shared* pll id */
1229 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1230 	{
1231 		/* DPLL 0 */
1232 		.ctl = LCPLL1_CTL,
1233 		/* DPLL 0 doesn't support HDMI mode */
1234 	},
1235 	{
1236 		/* DPLL 1 */
1237 		.ctl = LCPLL2_CTL,
1238 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1239 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1240 	},
1241 	{
1242 		/* DPLL 2 */
1243 		.ctl = WRPLL_CTL(0),
1244 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1245 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1246 	},
1247 	{
1248 		/* DPLL 3 */
1249 		.ctl = WRPLL_CTL(1),
1250 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1251 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1252 	},
1253 };
1254 
1255 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1256 				    struct intel_shared_dpll *pll)
1257 {
1258 	const enum intel_dpll_id id = pll->info->id;
1259 	u32 val;
1260 
1261 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1262 
1263 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1264 		 DPLL_CTRL1_SSC(id) |
1265 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1266 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1267 
1268 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1269 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1270 }
1271 
1272 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1273 			       struct intel_shared_dpll *pll)
1274 {
1275 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1276 	const enum intel_dpll_id id = pll->info->id;
1277 
1278 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1279 
1280 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1281 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1282 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1283 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1284 
1285 	/* the enable bit is always bit 31 */
1286 	intel_de_write(dev_priv, regs[id].ctl,
1287 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1288 
1289 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1290 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1291 }
1292 
1293 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1294 				 struct intel_shared_dpll *pll)
1295 {
1296 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1297 }
1298 
1299 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1300 				struct intel_shared_dpll *pll)
1301 {
1302 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1303 	const enum intel_dpll_id id = pll->info->id;
1304 
1305 	/* the enable bit is always bit 31 */
1306 	intel_de_write(dev_priv, regs[id].ctl,
1307 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1308 	intel_de_posting_read(dev_priv, regs[id].ctl);
1309 }
1310 
1311 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1312 				  struct intel_shared_dpll *pll)
1313 {
1314 }
1315 
1316 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1317 				     struct intel_shared_dpll *pll,
1318 				     struct intel_dpll_hw_state *hw_state)
1319 {
1320 	u32 val;
1321 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1322 	const enum intel_dpll_id id = pll->info->id;
1323 	intel_wakeref_t wakeref;
1324 	bool ret;
1325 
1326 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1327 						     POWER_DOMAIN_DISPLAY_CORE);
1328 	if (!wakeref)
1329 		return false;
1330 
1331 	ret = false;
1332 
1333 	val = intel_de_read(dev_priv, regs[id].ctl);
1334 	if (!(val & LCPLL_PLL_ENABLE))
1335 		goto out;
1336 
1337 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1338 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1339 
1340 	/* avoid reading back stale values if HDMI mode is not enabled */
1341 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1342 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1343 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1344 	}
1345 	ret = true;
1346 
1347 out:
1348 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1349 
1350 	return ret;
1351 }
1352 
1353 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1354 				       struct intel_shared_dpll *pll,
1355 				       struct intel_dpll_hw_state *hw_state)
1356 {
1357 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1358 	const enum intel_dpll_id id = pll->info->id;
1359 	intel_wakeref_t wakeref;
1360 	u32 val;
1361 	bool ret;
1362 
1363 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1364 						     POWER_DOMAIN_DISPLAY_CORE);
1365 	if (!wakeref)
1366 		return false;
1367 
1368 	ret = false;
1369 
1370 	/* DPLL0 is always enabled since it drives CDCLK */
1371 	val = intel_de_read(dev_priv, regs[id].ctl);
1372 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1373 		goto out;
1374 
1375 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1376 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1377 
1378 	ret = true;
1379 
1380 out:
1381 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1382 
1383 	return ret;
1384 }
1385 
1386 struct skl_wrpll_context {
1387 	u64 min_deviation;		/* current minimal deviation */
1388 	u64 central_freq;		/* chosen central freq */
1389 	u64 dco_freq;			/* chosen dco freq */
1390 	unsigned int p;			/* chosen divider */
1391 };
1392 
1393 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1394 #define SKL_DCO_MAX_PDEVIATION	100
1395 #define SKL_DCO_MAX_NDEVIATION	600
1396 
1397 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1398 				  u64 central_freq,
1399 				  u64 dco_freq,
1400 				  unsigned int divider)
1401 {
1402 	u64 deviation;
1403 
1404 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1405 			      central_freq);
1406 
1407 	/* positive deviation */
1408 	if (dco_freq >= central_freq) {
1409 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1410 		    deviation < ctx->min_deviation) {
1411 			ctx->min_deviation = deviation;
1412 			ctx->central_freq = central_freq;
1413 			ctx->dco_freq = dco_freq;
1414 			ctx->p = divider;
1415 		}
1416 	/* negative deviation */
1417 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1418 		   deviation < ctx->min_deviation) {
1419 		ctx->min_deviation = deviation;
1420 		ctx->central_freq = central_freq;
1421 		ctx->dco_freq = dco_freq;
1422 		ctx->p = divider;
1423 	}
1424 }
1425 
1426 static void skl_wrpll_get_multipliers(unsigned int p,
1427 				      unsigned int *p0 /* out */,
1428 				      unsigned int *p1 /* out */,
1429 				      unsigned int *p2 /* out */)
1430 {
1431 	/* even dividers */
1432 	if (p % 2 == 0) {
1433 		unsigned int half = p / 2;
1434 
1435 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1436 			*p0 = 2;
1437 			*p1 = 1;
1438 			*p2 = half;
1439 		} else if (half % 2 == 0) {
1440 			*p0 = 2;
1441 			*p1 = half / 2;
1442 			*p2 = 2;
1443 		} else if (half % 3 == 0) {
1444 			*p0 = 3;
1445 			*p1 = half / 3;
1446 			*p2 = 2;
1447 		} else if (half % 7 == 0) {
1448 			*p0 = 7;
1449 			*p1 = half / 7;
1450 			*p2 = 2;
1451 		}
1452 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1453 		*p0 = 3;
1454 		*p1 = 1;
1455 		*p2 = p / 3;
1456 	} else if (p == 5 || p == 7) {
1457 		*p0 = p;
1458 		*p1 = 1;
1459 		*p2 = 1;
1460 	} else if (p == 15) {
1461 		*p0 = 3;
1462 		*p1 = 1;
1463 		*p2 = 5;
1464 	} else if (p == 21) {
1465 		*p0 = 7;
1466 		*p1 = 1;
1467 		*p2 = 3;
1468 	} else if (p == 35) {
1469 		*p0 = 7;
1470 		*p1 = 1;
1471 		*p2 = 5;
1472 	}
1473 }
1474 
1475 struct skl_wrpll_params {
1476 	u32 dco_fraction;
1477 	u32 dco_integer;
1478 	u32 qdiv_ratio;
1479 	u32 qdiv_mode;
1480 	u32 kdiv;
1481 	u32 pdiv;
1482 	u32 central_freq;
1483 };
1484 
1485 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1486 				      u64 afe_clock,
1487 				      int ref_clock,
1488 				      u64 central_freq,
1489 				      u32 p0, u32 p1, u32 p2)
1490 {
1491 	u64 dco_freq;
1492 
1493 	switch (central_freq) {
1494 	case 9600000000ULL:
1495 		params->central_freq = 0;
1496 		break;
1497 	case 9000000000ULL:
1498 		params->central_freq = 1;
1499 		break;
1500 	case 8400000000ULL:
1501 		params->central_freq = 3;
1502 	}
1503 
1504 	switch (p0) {
1505 	case 1:
1506 		params->pdiv = 0;
1507 		break;
1508 	case 2:
1509 		params->pdiv = 1;
1510 		break;
1511 	case 3:
1512 		params->pdiv = 2;
1513 		break;
1514 	case 7:
1515 		params->pdiv = 4;
1516 		break;
1517 	default:
1518 		WARN(1, "Incorrect PDiv\n");
1519 	}
1520 
1521 	switch (p2) {
1522 	case 5:
1523 		params->kdiv = 0;
1524 		break;
1525 	case 2:
1526 		params->kdiv = 1;
1527 		break;
1528 	case 3:
1529 		params->kdiv = 2;
1530 		break;
1531 	case 1:
1532 		params->kdiv = 3;
1533 		break;
1534 	default:
1535 		WARN(1, "Incorrect KDiv\n");
1536 	}
1537 
1538 	params->qdiv_ratio = p1;
1539 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1540 
1541 	dco_freq = p0 * p1 * p2 * afe_clock;
1542 
1543 	/*
1544 	 * Intermediate values are in Hz.
1545 	 * Divide by MHz to match bsepc
1546 	 */
1547 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1548 	params->dco_fraction =
1549 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1550 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1551 }
1552 
1553 static int
1554 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1555 			int ref_clock,
1556 			struct skl_wrpll_params *wrpll_params)
1557 {
1558 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1559 						 9000000000ULL,
1560 						 9600000000ULL };
1561 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1562 					    24, 28, 30, 32, 36, 40, 42, 44,
1563 					    48, 52, 54, 56, 60, 64, 66, 68,
1564 					    70, 72, 76, 78, 80, 84, 88, 90,
1565 					    92, 96, 98 };
1566 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1567 	static const struct {
1568 		const u8 *list;
1569 		int n_dividers;
1570 	} dividers[] = {
1571 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1572 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1573 	};
1574 	struct skl_wrpll_context ctx = {
1575 		.min_deviation = U64_MAX,
1576 	};
1577 	unsigned int dco, d, i;
1578 	unsigned int p0, p1, p2;
1579 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1580 
1581 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1582 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1583 			for (i = 0; i < dividers[d].n_dividers; i++) {
1584 				unsigned int p = dividers[d].list[i];
1585 				u64 dco_freq = p * afe_clock;
1586 
1587 				skl_wrpll_try_divider(&ctx,
1588 						      dco_central_freq[dco],
1589 						      dco_freq,
1590 						      p);
1591 				/*
1592 				 * Skip the remaining dividers if we're sure to
1593 				 * have found the definitive divider, we can't
1594 				 * improve a 0 deviation.
1595 				 */
1596 				if (ctx.min_deviation == 0)
1597 					goto skip_remaining_dividers;
1598 			}
1599 		}
1600 
1601 skip_remaining_dividers:
1602 		/*
1603 		 * If a solution is found with an even divider, prefer
1604 		 * this one.
1605 		 */
1606 		if (d == 0 && ctx.p)
1607 			break;
1608 	}
1609 
1610 	if (!ctx.p)
1611 		return -EINVAL;
1612 
1613 	/*
1614 	 * gcc incorrectly analyses that these can be used without being
1615 	 * initialized. To be fair, it's hard to guess.
1616 	 */
1617 	p0 = p1 = p2 = 0;
1618 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1619 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1620 				  ctx.central_freq, p0, p1, p2);
1621 
1622 	return 0;
1623 }
1624 
1625 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1626 				  const struct intel_shared_dpll *pll,
1627 				  const struct intel_dpll_hw_state *pll_state)
1628 {
1629 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1630 	u32 p0, p1, p2, dco_freq;
1631 
1632 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1633 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1634 
1635 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1636 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1637 	else
1638 		p1 = 1;
1639 
1640 
1641 	switch (p0) {
1642 	case DPLL_CFGCR2_PDIV_1:
1643 		p0 = 1;
1644 		break;
1645 	case DPLL_CFGCR2_PDIV_2:
1646 		p0 = 2;
1647 		break;
1648 	case DPLL_CFGCR2_PDIV_3:
1649 		p0 = 3;
1650 		break;
1651 	case DPLL_CFGCR2_PDIV_7_INVALID:
1652 		/*
1653 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1654 		 * handling it the same way as PDIV_7.
1655 		 */
1656 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1657 		fallthrough;
1658 	case DPLL_CFGCR2_PDIV_7:
1659 		p0 = 7;
1660 		break;
1661 	default:
1662 		MISSING_CASE(p0);
1663 		return 0;
1664 	}
1665 
1666 	switch (p2) {
1667 	case DPLL_CFGCR2_KDIV_5:
1668 		p2 = 5;
1669 		break;
1670 	case DPLL_CFGCR2_KDIV_2:
1671 		p2 = 2;
1672 		break;
1673 	case DPLL_CFGCR2_KDIV_3:
1674 		p2 = 3;
1675 		break;
1676 	case DPLL_CFGCR2_KDIV_1:
1677 		p2 = 1;
1678 		break;
1679 	default:
1680 		MISSING_CASE(p2);
1681 		return 0;
1682 	}
1683 
1684 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1685 		   ref_clock;
1686 
1687 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1688 		    ref_clock / 0x8000;
1689 
1690 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1691 		return 0;
1692 
1693 	return dco_freq / (p0 * p1 * p2 * 5);
1694 }
1695 
1696 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1697 {
1698 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1699 	struct skl_wrpll_params wrpll_params = {};
1700 	u32 ctrl1, cfgcr1, cfgcr2;
1701 	int ret;
1702 
1703 	/*
1704 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1705 	 * as the DPLL id in this function.
1706 	 */
1707 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1708 
1709 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1710 
1711 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1712 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1713 	if (ret)
1714 		return ret;
1715 
1716 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1717 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1718 		wrpll_params.dco_integer;
1719 
1720 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1721 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1722 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1723 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1724 		wrpll_params.central_freq;
1725 
1726 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1727 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1728 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1729 
1730 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1731 							&crtc_state->dpll_hw_state);
1732 
1733 	return 0;
1734 }
1735 
1736 static int
1737 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1738 {
1739 	u32 ctrl1;
1740 
1741 	/*
1742 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1743 	 * as the DPLL id in this function.
1744 	 */
1745 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1746 	switch (crtc_state->port_clock / 2) {
1747 	case 81000:
1748 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1749 		break;
1750 	case 135000:
1751 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1752 		break;
1753 	case 270000:
1754 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1755 		break;
1756 		/* eDP 1.4 rates */
1757 	case 162000:
1758 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1759 		break;
1760 	case 108000:
1761 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1762 		break;
1763 	case 216000:
1764 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1765 		break;
1766 	}
1767 
1768 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1769 
1770 	return 0;
1771 }
1772 
1773 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1774 				  const struct intel_shared_dpll *pll,
1775 				  const struct intel_dpll_hw_state *pll_state)
1776 {
1777 	int link_clock = 0;
1778 
1779 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1780 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1781 	case DPLL_CTRL1_LINK_RATE_810:
1782 		link_clock = 81000;
1783 		break;
1784 	case DPLL_CTRL1_LINK_RATE_1080:
1785 		link_clock = 108000;
1786 		break;
1787 	case DPLL_CTRL1_LINK_RATE_1350:
1788 		link_clock = 135000;
1789 		break;
1790 	case DPLL_CTRL1_LINK_RATE_1620:
1791 		link_clock = 162000;
1792 		break;
1793 	case DPLL_CTRL1_LINK_RATE_2160:
1794 		link_clock = 216000;
1795 		break;
1796 	case DPLL_CTRL1_LINK_RATE_2700:
1797 		link_clock = 270000;
1798 		break;
1799 	default:
1800 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1801 		break;
1802 	}
1803 
1804 	return link_clock * 2;
1805 }
1806 
1807 static int skl_compute_dpll(struct intel_atomic_state *state,
1808 			    struct intel_crtc *crtc,
1809 			    struct intel_encoder *encoder)
1810 {
1811 	struct intel_crtc_state *crtc_state =
1812 		intel_atomic_get_new_crtc_state(state, crtc);
1813 
1814 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1815 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1816 	else if (intel_crtc_has_dp_encoder(crtc_state))
1817 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1818 	else
1819 		return -EINVAL;
1820 }
1821 
1822 static int skl_get_dpll(struct intel_atomic_state *state,
1823 			struct intel_crtc *crtc,
1824 			struct intel_encoder *encoder)
1825 {
1826 	struct intel_crtc_state *crtc_state =
1827 		intel_atomic_get_new_crtc_state(state, crtc);
1828 	struct intel_shared_dpll *pll;
1829 
1830 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1831 		pll = intel_find_shared_dpll(state, crtc,
1832 					     &crtc_state->dpll_hw_state,
1833 					     BIT(DPLL_ID_SKL_DPLL0));
1834 	else
1835 		pll = intel_find_shared_dpll(state, crtc,
1836 					     &crtc_state->dpll_hw_state,
1837 					     BIT(DPLL_ID_SKL_DPLL3) |
1838 					     BIT(DPLL_ID_SKL_DPLL2) |
1839 					     BIT(DPLL_ID_SKL_DPLL1));
1840 	if (!pll)
1841 		return -EINVAL;
1842 
1843 	intel_reference_shared_dpll(state, crtc,
1844 				    pll, &crtc_state->dpll_hw_state);
1845 
1846 	crtc_state->shared_dpll = pll;
1847 
1848 	return 0;
1849 }
1850 
1851 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1852 				const struct intel_shared_dpll *pll,
1853 				const struct intel_dpll_hw_state *pll_state)
1854 {
1855 	/*
1856 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1857 	 * the internal shift for each field
1858 	 */
1859 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1860 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1861 	else
1862 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1863 }
1864 
1865 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1866 {
1867 	/* No SSC ref */
1868 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1869 }
1870 
1871 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1872 			      const struct intel_dpll_hw_state *hw_state)
1873 {
1874 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1875 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1876 		      hw_state->ctrl1,
1877 		      hw_state->cfgcr1,
1878 		      hw_state->cfgcr2);
1879 }
1880 
1881 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1882 	.enable = skl_ddi_pll_enable,
1883 	.disable = skl_ddi_pll_disable,
1884 	.get_hw_state = skl_ddi_pll_get_hw_state,
1885 	.get_freq = skl_ddi_pll_get_freq,
1886 };
1887 
1888 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1889 	.enable = skl_ddi_dpll0_enable,
1890 	.disable = skl_ddi_dpll0_disable,
1891 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1892 	.get_freq = skl_ddi_pll_get_freq,
1893 };
1894 
1895 static const struct dpll_info skl_plls[] = {
1896 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1897 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1898 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1899 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1900 	{ },
1901 };
1902 
1903 static const struct intel_dpll_mgr skl_pll_mgr = {
1904 	.dpll_info = skl_plls,
1905 	.compute_dplls = skl_compute_dpll,
1906 	.get_dplls = skl_get_dpll,
1907 	.put_dplls = intel_put_dpll,
1908 	.update_ref_clks = skl_update_dpll_ref_clks,
1909 	.dump_hw_state = skl_dump_hw_state,
1910 };
1911 
1912 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1913 				struct intel_shared_dpll *pll)
1914 {
1915 	u32 temp;
1916 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1917 	enum dpio_phy phy;
1918 	enum dpio_channel ch;
1919 
1920 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1921 
1922 	/* Non-SSC reference */
1923 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1924 	temp |= PORT_PLL_REF_SEL;
1925 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1926 
1927 	if (IS_GEMINILAKE(dev_priv)) {
1928 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1929 		temp |= PORT_PLL_POWER_ENABLE;
1930 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1931 
1932 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1933 				 PORT_PLL_POWER_STATE), 200))
1934 			drm_err(&dev_priv->drm,
1935 				"Power state not set for PLL:%d\n", port);
1936 	}
1937 
1938 	/* Disable 10 bit clock */
1939 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1940 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1941 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1942 
1943 	/* Write P1 & P2 */
1944 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1945 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1946 	temp |= pll->state.hw_state.ebb0;
1947 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1948 
1949 	/* Write M2 integer */
1950 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1951 	temp &= ~PORT_PLL_M2_INT_MASK;
1952 	temp |= pll->state.hw_state.pll0;
1953 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1954 
1955 	/* Write N */
1956 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1957 	temp &= ~PORT_PLL_N_MASK;
1958 	temp |= pll->state.hw_state.pll1;
1959 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1960 
1961 	/* Write M2 fraction */
1962 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1963 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1964 	temp |= pll->state.hw_state.pll2;
1965 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1966 
1967 	/* Write M2 fraction enable */
1968 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1969 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1970 	temp |= pll->state.hw_state.pll3;
1971 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1972 
1973 	/* Write coeff */
1974 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1975 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1976 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1977 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1978 	temp |= pll->state.hw_state.pll6;
1979 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1980 
1981 	/* Write calibration val */
1982 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1983 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1984 	temp |= pll->state.hw_state.pll8;
1985 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1986 
1987 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1988 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1989 	temp |= pll->state.hw_state.pll9;
1990 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1991 
1992 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1993 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1994 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1995 	temp |= pll->state.hw_state.pll10;
1996 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1997 
1998 	/* Recalibrate with new settings */
1999 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2000 	temp |= PORT_PLL_RECALIBRATE;
2001 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2002 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
2003 	temp |= pll->state.hw_state.ebb4;
2004 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
2005 
2006 	/* Enable PLL */
2007 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2008 	temp |= PORT_PLL_ENABLE;
2009 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2010 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2011 
2012 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2013 			200))
2014 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
2015 
2016 	if (IS_GEMINILAKE(dev_priv)) {
2017 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2018 		temp |= DCC_DELAY_RANGE_2;
2019 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2020 	}
2021 
2022 	/*
2023 	 * While we write to the group register to program all lanes at once we
2024 	 * can read only lane registers and we pick lanes 0/1 for that.
2025 	 */
2026 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2027 	temp &= ~LANE_STAGGER_MASK;
2028 	temp &= ~LANESTAGGER_STRAP_OVRD;
2029 	temp |= pll->state.hw_state.pcsdw12;
2030 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2031 }
2032 
2033 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2034 					struct intel_shared_dpll *pll)
2035 {
2036 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2037 	u32 temp;
2038 
2039 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2040 	temp &= ~PORT_PLL_ENABLE;
2041 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2042 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2043 
2044 	if (IS_GEMINILAKE(dev_priv)) {
2045 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2046 		temp &= ~PORT_PLL_POWER_ENABLE;
2047 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2048 
2049 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2050 				  PORT_PLL_POWER_STATE), 200))
2051 			drm_err(&dev_priv->drm,
2052 				"Power state not reset for PLL:%d\n", port);
2053 	}
2054 }
2055 
2056 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2057 					struct intel_shared_dpll *pll,
2058 					struct intel_dpll_hw_state *hw_state)
2059 {
2060 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2061 	intel_wakeref_t wakeref;
2062 	enum dpio_phy phy;
2063 	enum dpio_channel ch;
2064 	u32 val;
2065 	bool ret;
2066 
2067 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2068 
2069 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2070 						     POWER_DOMAIN_DISPLAY_CORE);
2071 	if (!wakeref)
2072 		return false;
2073 
2074 	ret = false;
2075 
2076 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2077 	if (!(val & PORT_PLL_ENABLE))
2078 		goto out;
2079 
2080 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2081 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2082 
2083 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2084 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2085 
2086 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2087 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2088 
2089 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2090 	hw_state->pll1 &= PORT_PLL_N_MASK;
2091 
2092 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2093 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2094 
2095 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2096 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2097 
2098 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2099 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2100 			  PORT_PLL_INT_COEFF_MASK |
2101 			  PORT_PLL_GAIN_CTL_MASK;
2102 
2103 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2104 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2105 
2106 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2107 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2108 
2109 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2110 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2111 			   PORT_PLL_DCO_AMP_MASK;
2112 
2113 	/*
2114 	 * While we write to the group register to program all lanes at once we
2115 	 * can read only lane registers. We configure all lanes the same way, so
2116 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2117 	 */
2118 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2119 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2120 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2121 		drm_dbg(&dev_priv->drm,
2122 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2123 			hw_state->pcsdw12,
2124 			intel_de_read(dev_priv,
2125 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2126 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2127 
2128 	ret = true;
2129 
2130 out:
2131 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2132 
2133 	return ret;
2134 }
2135 
2136 /* pre-calculated values for DP linkrates */
2137 static const struct dpll bxt_dp_clk_val[] = {
2138 	/* m2 is .22 binary fixed point */
2139 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2140 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2141 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2142 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2143 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2144 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2145 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2146 };
2147 
2148 static int
2149 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2150 			  struct dpll *clk_div)
2151 {
2152 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2153 
2154 	/* Calculate HDMI div */
2155 	/*
2156 	 * FIXME: tie the following calculation into
2157 	 * i9xx_crtc_compute_clock
2158 	 */
2159 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2160 		return -EINVAL;
2161 
2162 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2163 
2164 	return 0;
2165 }
2166 
2167 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2168 				    struct dpll *clk_div)
2169 {
2170 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2171 	int i;
2172 
2173 	*clk_div = bxt_dp_clk_val[0];
2174 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2175 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2176 			*clk_div = bxt_dp_clk_val[i];
2177 			break;
2178 		}
2179 	}
2180 
2181 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2182 
2183 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2184 		    clk_div->dot != crtc_state->port_clock);
2185 }
2186 
2187 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2188 				     const struct dpll *clk_div)
2189 {
2190 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2191 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2192 	int clock = crtc_state->port_clock;
2193 	int vco = clk_div->vco;
2194 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2195 	u32 lanestagger;
2196 
2197 	if (vco >= 6200000 && vco <= 6700000) {
2198 		prop_coef = 4;
2199 		int_coef = 9;
2200 		gain_ctl = 3;
2201 		targ_cnt = 8;
2202 	} else if ((vco > 5400000 && vco < 6200000) ||
2203 			(vco >= 4800000 && vco < 5400000)) {
2204 		prop_coef = 5;
2205 		int_coef = 11;
2206 		gain_ctl = 3;
2207 		targ_cnt = 9;
2208 	} else if (vco == 5400000) {
2209 		prop_coef = 3;
2210 		int_coef = 8;
2211 		gain_ctl = 1;
2212 		targ_cnt = 9;
2213 	} else {
2214 		drm_err(&i915->drm, "Invalid VCO\n");
2215 		return -EINVAL;
2216 	}
2217 
2218 	if (clock > 270000)
2219 		lanestagger = 0x18;
2220 	else if (clock > 135000)
2221 		lanestagger = 0x0d;
2222 	else if (clock > 67000)
2223 		lanestagger = 0x07;
2224 	else if (clock > 33000)
2225 		lanestagger = 0x04;
2226 	else
2227 		lanestagger = 0x02;
2228 
2229 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2230 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2231 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2232 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2233 
2234 	if (clk_div->m2 & 0x3fffff)
2235 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2236 
2237 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2238 		PORT_PLL_INT_COEFF(int_coef) |
2239 		PORT_PLL_GAIN_CTL(gain_ctl);
2240 
2241 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2242 
2243 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2244 
2245 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2246 		PORT_PLL_DCO_AMP_OVR_EN_H;
2247 
2248 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2249 
2250 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2251 
2252 	return 0;
2253 }
2254 
2255 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2256 				const struct intel_shared_dpll *pll,
2257 				const struct intel_dpll_hw_state *pll_state)
2258 {
2259 	struct dpll clock;
2260 
2261 	clock.m1 = 2;
2262 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2263 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2264 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2265 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2266 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2267 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2268 
2269 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2270 }
2271 
2272 static int
2273 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2274 {
2275 	struct dpll clk_div = {};
2276 
2277 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2278 
2279 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2280 }
2281 
2282 static int
2283 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2284 {
2285 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2286 	struct dpll clk_div = {};
2287 	int ret;
2288 
2289 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2290 
2291 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2292 	if (ret)
2293 		return ret;
2294 
2295 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2296 						      &crtc_state->dpll_hw_state);
2297 
2298 	return 0;
2299 }
2300 
2301 static int bxt_compute_dpll(struct intel_atomic_state *state,
2302 			    struct intel_crtc *crtc,
2303 			    struct intel_encoder *encoder)
2304 {
2305 	struct intel_crtc_state *crtc_state =
2306 		intel_atomic_get_new_crtc_state(state, crtc);
2307 
2308 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2309 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2310 	else if (intel_crtc_has_dp_encoder(crtc_state))
2311 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2312 	else
2313 		return -EINVAL;
2314 }
2315 
2316 static int bxt_get_dpll(struct intel_atomic_state *state,
2317 			struct intel_crtc *crtc,
2318 			struct intel_encoder *encoder)
2319 {
2320 	struct intel_crtc_state *crtc_state =
2321 		intel_atomic_get_new_crtc_state(state, crtc);
2322 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2323 	struct intel_shared_dpll *pll;
2324 	enum intel_dpll_id id;
2325 
2326 	/* 1:1 mapping between ports and PLLs */
2327 	id = (enum intel_dpll_id) encoder->port;
2328 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2329 
2330 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2331 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2332 
2333 	intel_reference_shared_dpll(state, crtc,
2334 				    pll, &crtc_state->dpll_hw_state);
2335 
2336 	crtc_state->shared_dpll = pll;
2337 
2338 	return 0;
2339 }
2340 
2341 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2342 {
2343 	i915->display.dpll.ref_clks.ssc = 100000;
2344 	i915->display.dpll.ref_clks.nssc = 100000;
2345 	/* DSI non-SSC ref 19.2MHz */
2346 }
2347 
2348 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2349 			      const struct intel_dpll_hw_state *hw_state)
2350 {
2351 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2352 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2353 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2354 		    hw_state->ebb0,
2355 		    hw_state->ebb4,
2356 		    hw_state->pll0,
2357 		    hw_state->pll1,
2358 		    hw_state->pll2,
2359 		    hw_state->pll3,
2360 		    hw_state->pll6,
2361 		    hw_state->pll8,
2362 		    hw_state->pll9,
2363 		    hw_state->pll10,
2364 		    hw_state->pcsdw12);
2365 }
2366 
2367 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2368 	.enable = bxt_ddi_pll_enable,
2369 	.disable = bxt_ddi_pll_disable,
2370 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2371 	.get_freq = bxt_ddi_pll_get_freq,
2372 };
2373 
2374 static const struct dpll_info bxt_plls[] = {
2375 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2376 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2377 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2378 	{ },
2379 };
2380 
2381 static const struct intel_dpll_mgr bxt_pll_mgr = {
2382 	.dpll_info = bxt_plls,
2383 	.compute_dplls = bxt_compute_dpll,
2384 	.get_dplls = bxt_get_dpll,
2385 	.put_dplls = intel_put_dpll,
2386 	.update_ref_clks = bxt_update_dpll_ref_clks,
2387 	.dump_hw_state = bxt_dump_hw_state,
2388 };
2389 
2390 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2391 				      int *qdiv, int *kdiv)
2392 {
2393 	/* even dividers */
2394 	if (bestdiv % 2 == 0) {
2395 		if (bestdiv == 2) {
2396 			*pdiv = 2;
2397 			*qdiv = 1;
2398 			*kdiv = 1;
2399 		} else if (bestdiv % 4 == 0) {
2400 			*pdiv = 2;
2401 			*qdiv = bestdiv / 4;
2402 			*kdiv = 2;
2403 		} else if (bestdiv % 6 == 0) {
2404 			*pdiv = 3;
2405 			*qdiv = bestdiv / 6;
2406 			*kdiv = 2;
2407 		} else if (bestdiv % 5 == 0) {
2408 			*pdiv = 5;
2409 			*qdiv = bestdiv / 10;
2410 			*kdiv = 2;
2411 		} else if (bestdiv % 14 == 0) {
2412 			*pdiv = 7;
2413 			*qdiv = bestdiv / 14;
2414 			*kdiv = 2;
2415 		}
2416 	} else {
2417 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2418 			*pdiv = bestdiv;
2419 			*qdiv = 1;
2420 			*kdiv = 1;
2421 		} else { /* 9, 15, 21 */
2422 			*pdiv = bestdiv / 3;
2423 			*qdiv = 1;
2424 			*kdiv = 3;
2425 		}
2426 	}
2427 }
2428 
2429 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2430 				      u32 dco_freq, u32 ref_freq,
2431 				      int pdiv, int qdiv, int kdiv)
2432 {
2433 	u32 dco;
2434 
2435 	switch (kdiv) {
2436 	case 1:
2437 		params->kdiv = 1;
2438 		break;
2439 	case 2:
2440 		params->kdiv = 2;
2441 		break;
2442 	case 3:
2443 		params->kdiv = 4;
2444 		break;
2445 	default:
2446 		WARN(1, "Incorrect KDiv\n");
2447 	}
2448 
2449 	switch (pdiv) {
2450 	case 2:
2451 		params->pdiv = 1;
2452 		break;
2453 	case 3:
2454 		params->pdiv = 2;
2455 		break;
2456 	case 5:
2457 		params->pdiv = 4;
2458 		break;
2459 	case 7:
2460 		params->pdiv = 8;
2461 		break;
2462 	default:
2463 		WARN(1, "Incorrect PDiv\n");
2464 	}
2465 
2466 	WARN_ON(kdiv != 2 && qdiv != 1);
2467 
2468 	params->qdiv_ratio = qdiv;
2469 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2470 
2471 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2472 
2473 	params->dco_integer = dco >> 15;
2474 	params->dco_fraction = dco & 0x7fff;
2475 }
2476 
2477 /*
2478  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2479  * Program half of the nominal DCO divider fraction value.
2480  */
2481 static bool
2482 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2483 {
2484 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2485 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2486 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2487 		 i915->display.dpll.ref_clks.nssc == 38400;
2488 }
2489 
2490 struct icl_combo_pll_params {
2491 	int clock;
2492 	struct skl_wrpll_params wrpll;
2493 };
2494 
2495 /*
2496  * These values alrea already adjusted: they're the bits we write to the
2497  * registers, not the logical values.
2498  */
2499 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2500 	{ 540000,
2501 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2502 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 	{ 270000,
2504 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2505 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506 	{ 162000,
2507 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2508 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2509 	{ 324000,
2510 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2511 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 	{ 216000,
2513 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2514 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2515 	{ 432000,
2516 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2517 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2518 	{ 648000,
2519 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2520 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 	{ 810000,
2522 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2523 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2524 };
2525 
2526 
2527 /* Also used for 38.4 MHz values. */
2528 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2529 	{ 540000,
2530 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2531 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2532 	{ 270000,
2533 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2534 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2535 	{ 162000,
2536 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2537 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2538 	{ 324000,
2539 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2540 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2541 	{ 216000,
2542 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2543 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2544 	{ 432000,
2545 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2546 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2547 	{ 648000,
2548 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2549 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2550 	{ 810000,
2551 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2552 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2553 };
2554 
2555 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2556 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2557 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2558 };
2559 
2560 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2561 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2562 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2563 };
2564 
2565 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2566 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2567 	/* the following params are unused */
2568 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2569 };
2570 
2571 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2572 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2573 	/* the following params are unused */
2574 };
2575 
2576 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2577 				 struct skl_wrpll_params *pll_params)
2578 {
2579 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2580 	const struct icl_combo_pll_params *params =
2581 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2582 		icl_dp_combo_pll_24MHz_values :
2583 		icl_dp_combo_pll_19_2MHz_values;
2584 	int clock = crtc_state->port_clock;
2585 	int i;
2586 
2587 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2588 		if (clock == params[i].clock) {
2589 			*pll_params = params[i].wrpll;
2590 			return 0;
2591 		}
2592 	}
2593 
2594 	MISSING_CASE(clock);
2595 	return -EINVAL;
2596 }
2597 
2598 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2599 			    struct skl_wrpll_params *pll_params)
2600 {
2601 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2602 
2603 	if (DISPLAY_VER(dev_priv) >= 12) {
2604 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2605 		default:
2606 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2607 			fallthrough;
2608 		case 19200:
2609 		case 38400:
2610 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2611 			break;
2612 		case 24000:
2613 			*pll_params = tgl_tbt_pll_24MHz_values;
2614 			break;
2615 		}
2616 	} else {
2617 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2618 		default:
2619 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2620 			fallthrough;
2621 		case 19200:
2622 		case 38400:
2623 			*pll_params = icl_tbt_pll_19_2MHz_values;
2624 			break;
2625 		case 24000:
2626 			*pll_params = icl_tbt_pll_24MHz_values;
2627 			break;
2628 		}
2629 	}
2630 
2631 	return 0;
2632 }
2633 
2634 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2635 				    const struct intel_shared_dpll *pll,
2636 				    const struct intel_dpll_hw_state *pll_state)
2637 {
2638 	/*
2639 	 * The PLL outputs multiple frequencies at the same time, selection is
2640 	 * made at DDI clock mux level.
2641 	 */
2642 	drm_WARN_ON(&i915->drm, 1);
2643 
2644 	return 0;
2645 }
2646 
2647 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2648 {
2649 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2650 
2651 	/*
2652 	 * For ICL+, the spec states: if reference frequency is 38.4,
2653 	 * use 19.2 because the DPLL automatically divides that by 2.
2654 	 */
2655 	if (ref_clock == 38400)
2656 		ref_clock = 19200;
2657 
2658 	return ref_clock;
2659 }
2660 
2661 static int
2662 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2663 	       struct skl_wrpll_params *wrpll_params)
2664 {
2665 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2666 	int ref_clock = icl_wrpll_ref_clock(i915);
2667 	u32 afe_clock = crtc_state->port_clock * 5;
2668 	u32 dco_min = 7998000;
2669 	u32 dco_max = 10000000;
2670 	u32 dco_mid = (dco_min + dco_max) / 2;
2671 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2672 					 18, 20, 24, 28, 30, 32,  36,  40,
2673 					 42, 44, 48, 50, 52, 54,  56,  60,
2674 					 64, 66, 68, 70, 72, 76,  78,  80,
2675 					 84, 88, 90, 92, 96, 98, 100, 102,
2676 					  3,  5,  7,  9, 15, 21 };
2677 	u32 dco, best_dco = 0, dco_centrality = 0;
2678 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2679 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2680 
2681 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2682 		dco = afe_clock * dividers[d];
2683 
2684 		if (dco <= dco_max && dco >= dco_min) {
2685 			dco_centrality = abs(dco - dco_mid);
2686 
2687 			if (dco_centrality < best_dco_centrality) {
2688 				best_dco_centrality = dco_centrality;
2689 				best_div = dividers[d];
2690 				best_dco = dco;
2691 			}
2692 		}
2693 	}
2694 
2695 	if (best_div == 0)
2696 		return -EINVAL;
2697 
2698 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2699 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2700 				  pdiv, qdiv, kdiv);
2701 
2702 	return 0;
2703 }
2704 
2705 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2706 				      const struct intel_shared_dpll *pll,
2707 				      const struct intel_dpll_hw_state *pll_state)
2708 {
2709 	int ref_clock = icl_wrpll_ref_clock(i915);
2710 	u32 dco_fraction;
2711 	u32 p0, p1, p2, dco_freq;
2712 
2713 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2714 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2715 
2716 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2717 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2718 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2719 	else
2720 		p1 = 1;
2721 
2722 	switch (p0) {
2723 	case DPLL_CFGCR1_PDIV_2:
2724 		p0 = 2;
2725 		break;
2726 	case DPLL_CFGCR1_PDIV_3:
2727 		p0 = 3;
2728 		break;
2729 	case DPLL_CFGCR1_PDIV_5:
2730 		p0 = 5;
2731 		break;
2732 	case DPLL_CFGCR1_PDIV_7:
2733 		p0 = 7;
2734 		break;
2735 	}
2736 
2737 	switch (p2) {
2738 	case DPLL_CFGCR1_KDIV_1:
2739 		p2 = 1;
2740 		break;
2741 	case DPLL_CFGCR1_KDIV_2:
2742 		p2 = 2;
2743 		break;
2744 	case DPLL_CFGCR1_KDIV_3:
2745 		p2 = 3;
2746 		break;
2747 	}
2748 
2749 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2750 		   ref_clock;
2751 
2752 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2753 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2754 
2755 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2756 		dco_fraction *= 2;
2757 
2758 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2759 
2760 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2761 		return 0;
2762 
2763 	return dco_freq / (p0 * p1 * p2 * 5);
2764 }
2765 
2766 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2767 				const struct skl_wrpll_params *pll_params,
2768 				struct intel_dpll_hw_state *pll_state)
2769 {
2770 	u32 dco_fraction = pll_params->dco_fraction;
2771 
2772 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2773 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2774 
2775 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2776 			    pll_params->dco_integer;
2777 
2778 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2779 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2780 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2781 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2782 
2783 	if (DISPLAY_VER(i915) >= 12)
2784 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2785 	else
2786 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2787 
2788 	if (i915->display.vbt.override_afc_startup)
2789 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2790 }
2791 
2792 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2793 				    u32 *target_dco_khz,
2794 				    struct intel_dpll_hw_state *state,
2795 				    bool is_dkl)
2796 {
2797 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2798 	u32 dco_min_freq, dco_max_freq;
2799 	unsigned int i;
2800 	int div2;
2801 
2802 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2803 	dco_max_freq = is_dp ? 8100000 : 10000000;
2804 
2805 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2806 		int div1 = div1_vals[i];
2807 
2808 		for (div2 = 10; div2 > 0; div2--) {
2809 			int dco = div1 * div2 * clock_khz * 5;
2810 			int a_divratio, tlinedrv, inputsel;
2811 			u32 hsdiv;
2812 
2813 			if (dco < dco_min_freq || dco > dco_max_freq)
2814 				continue;
2815 
2816 			if (div2 >= 2) {
2817 				/*
2818 				 * Note: a_divratio not matching TGL BSpec
2819 				 * algorithm but matching hardcoded values and
2820 				 * working on HW for DP alt-mode at least
2821 				 */
2822 				a_divratio = is_dp ? 10 : 5;
2823 				tlinedrv = is_dkl ? 1 : 2;
2824 			} else {
2825 				a_divratio = 5;
2826 				tlinedrv = 0;
2827 			}
2828 			inputsel = is_dp ? 0 : 1;
2829 
2830 			switch (div1) {
2831 			default:
2832 				MISSING_CASE(div1);
2833 				fallthrough;
2834 			case 2:
2835 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2836 				break;
2837 			case 3:
2838 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2839 				break;
2840 			case 5:
2841 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2842 				break;
2843 			case 7:
2844 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2845 				break;
2846 			}
2847 
2848 			*target_dco_khz = dco;
2849 
2850 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2851 
2852 			state->mg_clktop2_coreclkctl1 =
2853 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2854 
2855 			state->mg_clktop2_hsclkctl =
2856 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2857 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2858 				hsdiv |
2859 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2860 
2861 			return 0;
2862 		}
2863 	}
2864 
2865 	return -EINVAL;
2866 }
2867 
2868 /*
2869  * The specification for this function uses real numbers, so the math had to be
2870  * adapted to integer-only calculation, that's why it looks so different.
2871  */
2872 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2873 				 struct intel_dpll_hw_state *pll_state)
2874 {
2875 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2876 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2877 	int clock = crtc_state->port_clock;
2878 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2879 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2880 	u32 prop_coeff, int_coeff;
2881 	u32 tdc_targetcnt, feedfwgain;
2882 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2883 	u64 tmp;
2884 	bool use_ssc = false;
2885 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2886 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2887 	int ret;
2888 
2889 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2890 				       pll_state, is_dkl);
2891 	if (ret)
2892 		return ret;
2893 
2894 	m1div = 2;
2895 	m2div_int = dco_khz / (refclk_khz * m1div);
2896 	if (m2div_int > 255) {
2897 		if (!is_dkl) {
2898 			m1div = 4;
2899 			m2div_int = dco_khz / (refclk_khz * m1div);
2900 		}
2901 
2902 		if (m2div_int > 255)
2903 			return -EINVAL;
2904 	}
2905 	m2div_rem = dco_khz % (refclk_khz * m1div);
2906 
2907 	tmp = (u64)m2div_rem * (1 << 22);
2908 	do_div(tmp, refclk_khz * m1div);
2909 	m2div_frac = tmp;
2910 
2911 	switch (refclk_khz) {
2912 	case 19200:
2913 		iref_ndiv = 1;
2914 		iref_trim = 28;
2915 		iref_pulse_w = 1;
2916 		break;
2917 	case 24000:
2918 		iref_ndiv = 1;
2919 		iref_trim = 25;
2920 		iref_pulse_w = 2;
2921 		break;
2922 	case 38400:
2923 		iref_ndiv = 2;
2924 		iref_trim = 28;
2925 		iref_pulse_w = 1;
2926 		break;
2927 	default:
2928 		MISSING_CASE(refclk_khz);
2929 		return -EINVAL;
2930 	}
2931 
2932 	/*
2933 	 * tdc_res = 0.000003
2934 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2935 	 *
2936 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2937 	 * was supposed to be a division, but we rearranged the operations of
2938 	 * the formula to avoid early divisions so we don't multiply the
2939 	 * rounding errors.
2940 	 *
2941 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2942 	 * we also rearrange to work with integers.
2943 	 *
2944 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2945 	 * last division by 10.
2946 	 */
2947 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2948 
2949 	/*
2950 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2951 	 * 32 bits. That's not a problem since we round the division down
2952 	 * anyway.
2953 	 */
2954 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2955 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2956 
2957 	if (dco_khz >= 9000000) {
2958 		prop_coeff = 5;
2959 		int_coeff = 10;
2960 	} else {
2961 		prop_coeff = 4;
2962 		int_coeff = 8;
2963 	}
2964 
2965 	if (use_ssc) {
2966 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2967 		do_div(tmp, refclk_khz * m1div * 10000);
2968 		ssc_stepsize = tmp;
2969 
2970 		tmp = mul_u32_u32(dco_khz, 1000);
2971 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2972 	} else {
2973 		ssc_stepsize = 0;
2974 		ssc_steplen = 0;
2975 	}
2976 	ssc_steplog = 4;
2977 
2978 	/* write pll_state calculations */
2979 	if (is_dkl) {
2980 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2981 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2982 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2983 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2984 		if (dev_priv->display.vbt.override_afc_startup) {
2985 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2986 
2987 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2988 		}
2989 
2990 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2991 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2992 
2993 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2994 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2995 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2996 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2997 
2998 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2999 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3000 
3001 		pll_state->mg_pll_tdc_coldst_bias =
3002 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3003 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3004 
3005 	} else {
3006 		pll_state->mg_pll_div0 =
3007 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3008 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3009 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3010 
3011 		pll_state->mg_pll_div1 =
3012 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3013 			MG_PLL_DIV1_DITHER_DIV_2 |
3014 			MG_PLL_DIV1_NDIVRATIO(1) |
3015 			MG_PLL_DIV1_FBPREDIV(m1div);
3016 
3017 		pll_state->mg_pll_lf =
3018 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3019 			MG_PLL_LF_AFCCNTSEL_512 |
3020 			MG_PLL_LF_GAINCTRL(1) |
3021 			MG_PLL_LF_INT_COEFF(int_coeff) |
3022 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3023 
3024 		pll_state->mg_pll_frac_lock =
3025 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3026 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3027 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3028 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3029 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3030 		if (use_ssc || m2div_rem > 0)
3031 			pll_state->mg_pll_frac_lock |=
3032 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3033 
3034 		pll_state->mg_pll_ssc =
3035 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3036 			MG_PLL_SSC_TYPE(2) |
3037 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3038 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3039 			MG_PLL_SSC_FLLEN |
3040 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3041 
3042 		pll_state->mg_pll_tdc_coldst_bias =
3043 			MG_PLL_TDC_COLDST_COLDSTART |
3044 			MG_PLL_TDC_COLDST_IREFINT_EN |
3045 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3046 			MG_PLL_TDC_TDCOVCCORR_EN |
3047 			MG_PLL_TDC_TDCSEL(3);
3048 
3049 		pll_state->mg_pll_bias =
3050 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3051 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3052 			MG_PLL_BIAS_BIAS_BONUS(10) |
3053 			MG_PLL_BIAS_BIASCAL_EN |
3054 			MG_PLL_BIAS_CTRIM(12) |
3055 			MG_PLL_BIAS_VREF_RDAC(4) |
3056 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3057 
3058 		if (refclk_khz == 38400) {
3059 			pll_state->mg_pll_tdc_coldst_bias_mask =
3060 				MG_PLL_TDC_COLDST_COLDSTART;
3061 			pll_state->mg_pll_bias_mask = 0;
3062 		} else {
3063 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3064 			pll_state->mg_pll_bias_mask = -1U;
3065 		}
3066 
3067 		pll_state->mg_pll_tdc_coldst_bias &=
3068 			pll_state->mg_pll_tdc_coldst_bias_mask;
3069 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3070 	}
3071 
3072 	return 0;
3073 }
3074 
3075 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3076 				   const struct intel_shared_dpll *pll,
3077 				   const struct intel_dpll_hw_state *pll_state)
3078 {
3079 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3080 	u64 tmp;
3081 
3082 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3083 
3084 	if (DISPLAY_VER(dev_priv) >= 12) {
3085 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3086 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3087 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3088 
3089 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3090 			m2_frac = pll_state->mg_pll_bias &
3091 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3092 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3093 		} else {
3094 			m2_frac = 0;
3095 		}
3096 	} else {
3097 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3098 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3099 
3100 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3101 			m2_frac = pll_state->mg_pll_div0 &
3102 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3103 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3104 		} else {
3105 			m2_frac = 0;
3106 		}
3107 	}
3108 
3109 	switch (pll_state->mg_clktop2_hsclkctl &
3110 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3111 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3112 		div1 = 2;
3113 		break;
3114 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3115 		div1 = 3;
3116 		break;
3117 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3118 		div1 = 5;
3119 		break;
3120 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3121 		div1 = 7;
3122 		break;
3123 	default:
3124 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3125 		return 0;
3126 	}
3127 
3128 	div2 = (pll_state->mg_clktop2_hsclkctl &
3129 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3130 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3131 
3132 	/* div2 value of 0 is same as 1 means no div */
3133 	if (div2 == 0)
3134 		div2 = 1;
3135 
3136 	/*
3137 	 * Adjust the original formula to delay the division by 2^22 in order to
3138 	 * minimize possible rounding errors.
3139 	 */
3140 	tmp = (u64)m1 * m2_int * ref_clock +
3141 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3142 	tmp = div_u64(tmp, 5 * div1 * div2);
3143 
3144 	return tmp;
3145 }
3146 
3147 /**
3148  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3149  * @crtc_state: state for the CRTC to select the DPLL for
3150  * @port_dpll_id: the active @port_dpll_id to select
3151  *
3152  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3153  * CRTC.
3154  */
3155 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3156 			      enum icl_port_dpll_id port_dpll_id)
3157 {
3158 	struct icl_port_dpll *port_dpll =
3159 		&crtc_state->icl_port_dplls[port_dpll_id];
3160 
3161 	crtc_state->shared_dpll = port_dpll->pll;
3162 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3163 }
3164 
3165 static void icl_update_active_dpll(struct intel_atomic_state *state,
3166 				   struct intel_crtc *crtc,
3167 				   struct intel_encoder *encoder)
3168 {
3169 	struct intel_crtc_state *crtc_state =
3170 		intel_atomic_get_new_crtc_state(state, crtc);
3171 	struct intel_digital_port *primary_port;
3172 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3173 
3174 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3175 		enc_to_mst(encoder)->primary :
3176 		enc_to_dig_port(encoder);
3177 
3178 	if (primary_port &&
3179 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3180 	     intel_tc_port_in_legacy_mode(primary_port)))
3181 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3182 
3183 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3184 }
3185 
3186 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3187 {
3188 	if (!(i915->hti_state & HDPORT_ENABLED))
3189 		return 0;
3190 
3191 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3192 }
3193 
3194 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3195 				      struct intel_crtc *crtc)
3196 {
3197 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3198 	struct intel_crtc_state *crtc_state =
3199 		intel_atomic_get_new_crtc_state(state, crtc);
3200 	struct icl_port_dpll *port_dpll =
3201 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3202 	struct skl_wrpll_params pll_params = {};
3203 	int ret;
3204 
3205 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3206 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3207 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3208 	else
3209 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3210 
3211 	if (ret)
3212 		return ret;
3213 
3214 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3215 
3216 	/* this is mainly for the fastset check */
3217 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3218 
3219 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3220 							    &port_dpll->hw_state);
3221 
3222 	return 0;
3223 }
3224 
3225 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3226 				  struct intel_crtc *crtc,
3227 				  struct intel_encoder *encoder)
3228 {
3229 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3230 	struct intel_crtc_state *crtc_state =
3231 		intel_atomic_get_new_crtc_state(state, crtc);
3232 	struct icl_port_dpll *port_dpll =
3233 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3234 	enum port port = encoder->port;
3235 	unsigned long dpll_mask;
3236 
3237 	if (IS_ALDERLAKE_S(dev_priv)) {
3238 		dpll_mask =
3239 			BIT(DPLL_ID_DG1_DPLL3) |
3240 			BIT(DPLL_ID_DG1_DPLL2) |
3241 			BIT(DPLL_ID_ICL_DPLL1) |
3242 			BIT(DPLL_ID_ICL_DPLL0);
3243 	} else if (IS_DG1(dev_priv)) {
3244 		if (port == PORT_D || port == PORT_E) {
3245 			dpll_mask =
3246 				BIT(DPLL_ID_DG1_DPLL2) |
3247 				BIT(DPLL_ID_DG1_DPLL3);
3248 		} else {
3249 			dpll_mask =
3250 				BIT(DPLL_ID_DG1_DPLL0) |
3251 				BIT(DPLL_ID_DG1_DPLL1);
3252 		}
3253 	} else if (IS_ROCKETLAKE(dev_priv)) {
3254 		dpll_mask =
3255 			BIT(DPLL_ID_EHL_DPLL4) |
3256 			BIT(DPLL_ID_ICL_DPLL1) |
3257 			BIT(DPLL_ID_ICL_DPLL0);
3258 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3259 		dpll_mask =
3260 			BIT(DPLL_ID_EHL_DPLL4) |
3261 			BIT(DPLL_ID_ICL_DPLL1) |
3262 			BIT(DPLL_ID_ICL_DPLL0);
3263 	} else {
3264 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3265 	}
3266 
3267 	/* Eliminate DPLLs from consideration if reserved by HTI */
3268 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3269 
3270 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3271 						&port_dpll->hw_state,
3272 						dpll_mask);
3273 	if (!port_dpll->pll)
3274 		return -EINVAL;
3275 
3276 	intel_reference_shared_dpll(state, crtc,
3277 				    port_dpll->pll, &port_dpll->hw_state);
3278 
3279 	icl_update_active_dpll(state, crtc, encoder);
3280 
3281 	return 0;
3282 }
3283 
3284 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3285 				    struct intel_crtc *crtc)
3286 {
3287 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3288 	struct intel_crtc_state *crtc_state =
3289 		intel_atomic_get_new_crtc_state(state, crtc);
3290 	struct icl_port_dpll *port_dpll =
3291 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3292 	struct skl_wrpll_params pll_params = {};
3293 	int ret;
3294 
3295 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3296 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3297 	if (ret)
3298 		return ret;
3299 
3300 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3301 
3302 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3303 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3304 	if (ret)
3305 		return ret;
3306 
3307 	/* this is mainly for the fastset check */
3308 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3309 
3310 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3311 							 &port_dpll->hw_state);
3312 
3313 	return 0;
3314 }
3315 
3316 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3317 				struct intel_crtc *crtc,
3318 				struct intel_encoder *encoder)
3319 {
3320 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3321 	struct intel_crtc_state *crtc_state =
3322 		intel_atomic_get_new_crtc_state(state, crtc);
3323 	struct icl_port_dpll *port_dpll =
3324 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3325 	enum intel_dpll_id dpll_id;
3326 	int ret;
3327 
3328 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3329 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3330 						&port_dpll->hw_state,
3331 						BIT(DPLL_ID_ICL_TBTPLL));
3332 	if (!port_dpll->pll)
3333 		return -EINVAL;
3334 	intel_reference_shared_dpll(state, crtc,
3335 				    port_dpll->pll, &port_dpll->hw_state);
3336 
3337 
3338 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3339 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3340 							 encoder->port));
3341 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3342 						&port_dpll->hw_state,
3343 						BIT(dpll_id));
3344 	if (!port_dpll->pll) {
3345 		ret = -EINVAL;
3346 		goto err_unreference_tbt_pll;
3347 	}
3348 	intel_reference_shared_dpll(state, crtc,
3349 				    port_dpll->pll, &port_dpll->hw_state);
3350 
3351 	icl_update_active_dpll(state, crtc, encoder);
3352 
3353 	return 0;
3354 
3355 err_unreference_tbt_pll:
3356 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3357 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3358 
3359 	return ret;
3360 }
3361 
3362 static int icl_compute_dplls(struct intel_atomic_state *state,
3363 			     struct intel_crtc *crtc,
3364 			     struct intel_encoder *encoder)
3365 {
3366 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3367 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3368 
3369 	if (intel_phy_is_combo(dev_priv, phy))
3370 		return icl_compute_combo_phy_dpll(state, crtc);
3371 	else if (intel_phy_is_tc(dev_priv, phy))
3372 		return icl_compute_tc_phy_dplls(state, crtc);
3373 
3374 	MISSING_CASE(phy);
3375 
3376 	return 0;
3377 }
3378 
3379 static int icl_get_dplls(struct intel_atomic_state *state,
3380 			 struct intel_crtc *crtc,
3381 			 struct intel_encoder *encoder)
3382 {
3383 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3384 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3385 
3386 	if (intel_phy_is_combo(dev_priv, phy))
3387 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3388 	else if (intel_phy_is_tc(dev_priv, phy))
3389 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3390 
3391 	MISSING_CASE(phy);
3392 
3393 	return -EINVAL;
3394 }
3395 
3396 static void icl_put_dplls(struct intel_atomic_state *state,
3397 			  struct intel_crtc *crtc)
3398 {
3399 	const struct intel_crtc_state *old_crtc_state =
3400 		intel_atomic_get_old_crtc_state(state, crtc);
3401 	struct intel_crtc_state *new_crtc_state =
3402 		intel_atomic_get_new_crtc_state(state, crtc);
3403 	enum icl_port_dpll_id id;
3404 
3405 	new_crtc_state->shared_dpll = NULL;
3406 
3407 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3408 		const struct icl_port_dpll *old_port_dpll =
3409 			&old_crtc_state->icl_port_dplls[id];
3410 		struct icl_port_dpll *new_port_dpll =
3411 			&new_crtc_state->icl_port_dplls[id];
3412 
3413 		new_port_dpll->pll = NULL;
3414 
3415 		if (!old_port_dpll->pll)
3416 			continue;
3417 
3418 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3419 	}
3420 }
3421 
3422 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3423 				struct intel_shared_dpll *pll,
3424 				struct intel_dpll_hw_state *hw_state)
3425 {
3426 	const enum intel_dpll_id id = pll->info->id;
3427 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3428 	intel_wakeref_t wakeref;
3429 	bool ret = false;
3430 	u32 val;
3431 
3432 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3433 
3434 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3435 						     POWER_DOMAIN_DISPLAY_CORE);
3436 	if (!wakeref)
3437 		return false;
3438 
3439 	val = intel_de_read(dev_priv, enable_reg);
3440 	if (!(val & PLL_ENABLE))
3441 		goto out;
3442 
3443 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3444 						  MG_REFCLKIN_CTL(tc_port));
3445 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3446 
3447 	hw_state->mg_clktop2_coreclkctl1 =
3448 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3449 	hw_state->mg_clktop2_coreclkctl1 &=
3450 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3451 
3452 	hw_state->mg_clktop2_hsclkctl =
3453 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3454 	hw_state->mg_clktop2_hsclkctl &=
3455 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3456 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3457 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3458 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3459 
3460 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3461 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3462 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3463 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3464 						   MG_PLL_FRAC_LOCK(tc_port));
3465 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3466 
3467 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3468 	hw_state->mg_pll_tdc_coldst_bias =
3469 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3470 
3471 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3472 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3473 		hw_state->mg_pll_bias_mask = 0;
3474 	} else {
3475 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3476 		hw_state->mg_pll_bias_mask = -1U;
3477 	}
3478 
3479 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3480 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3481 
3482 	ret = true;
3483 out:
3484 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3485 	return ret;
3486 }
3487 
3488 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3489 				 struct intel_shared_dpll *pll,
3490 				 struct intel_dpll_hw_state *hw_state)
3491 {
3492 	const enum intel_dpll_id id = pll->info->id;
3493 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3494 	intel_wakeref_t wakeref;
3495 	bool ret = false;
3496 	u32 val;
3497 
3498 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3499 						     POWER_DOMAIN_DISPLAY_CORE);
3500 	if (!wakeref)
3501 		return false;
3502 
3503 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3504 	if (!(val & PLL_ENABLE))
3505 		goto out;
3506 
3507 	/*
3508 	 * All registers read here have the same HIP_INDEX_REG even though
3509 	 * they are on different building blocks
3510 	 */
3511 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3512 		       HIP_INDEX_VAL(tc_port, 0x2));
3513 
3514 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3515 						  DKL_REFCLKIN_CTL(tc_port));
3516 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3517 
3518 	hw_state->mg_clktop2_hsclkctl =
3519 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3520 	hw_state->mg_clktop2_hsclkctl &=
3521 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3522 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3523 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3524 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3525 
3526 	hw_state->mg_clktop2_coreclkctl1 =
3527 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3528 	hw_state->mg_clktop2_coreclkctl1 &=
3529 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3530 
3531 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3532 	val = DKL_PLL_DIV0_MASK;
3533 	if (dev_priv->display.vbt.override_afc_startup)
3534 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3535 	hw_state->mg_pll_div0 &= val;
3536 
3537 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3538 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3539 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3540 
3541 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3542 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3543 				 DKL_PLL_SSC_STEP_LEN_MASK |
3544 				 DKL_PLL_SSC_STEP_NUM_MASK |
3545 				 DKL_PLL_SSC_EN);
3546 
3547 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3548 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3549 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3550 
3551 	hw_state->mg_pll_tdc_coldst_bias =
3552 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3553 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3554 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3555 
3556 	ret = true;
3557 out:
3558 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3559 	return ret;
3560 }
3561 
3562 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3563 				 struct intel_shared_dpll *pll,
3564 				 struct intel_dpll_hw_state *hw_state,
3565 				 i915_reg_t enable_reg)
3566 {
3567 	const enum intel_dpll_id id = pll->info->id;
3568 	intel_wakeref_t wakeref;
3569 	bool ret = false;
3570 	u32 val;
3571 
3572 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3573 						     POWER_DOMAIN_DISPLAY_CORE);
3574 	if (!wakeref)
3575 		return false;
3576 
3577 	val = intel_de_read(dev_priv, enable_reg);
3578 	if (!(val & PLL_ENABLE))
3579 		goto out;
3580 
3581 	if (IS_ALDERLAKE_S(dev_priv)) {
3582 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3583 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3584 	} else if (IS_DG1(dev_priv)) {
3585 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3586 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3587 	} else if (IS_ROCKETLAKE(dev_priv)) {
3588 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3589 						 RKL_DPLL_CFGCR0(id));
3590 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3591 						 RKL_DPLL_CFGCR1(id));
3592 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3593 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3594 						 TGL_DPLL_CFGCR0(id));
3595 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3596 						 TGL_DPLL_CFGCR1(id));
3597 		if (dev_priv->display.vbt.override_afc_startup) {
3598 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3599 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3600 		}
3601 	} else {
3602 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3603 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3604 							 ICL_DPLL_CFGCR0(4));
3605 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3606 							 ICL_DPLL_CFGCR1(4));
3607 		} else {
3608 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3609 							 ICL_DPLL_CFGCR0(id));
3610 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3611 							 ICL_DPLL_CFGCR1(id));
3612 		}
3613 	}
3614 
3615 	ret = true;
3616 out:
3617 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3618 	return ret;
3619 }
3620 
3621 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3622 				   struct intel_shared_dpll *pll,
3623 				   struct intel_dpll_hw_state *hw_state)
3624 {
3625 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3626 
3627 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3628 }
3629 
3630 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3631 				 struct intel_shared_dpll *pll,
3632 				 struct intel_dpll_hw_state *hw_state)
3633 {
3634 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3635 }
3636 
3637 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3638 			   struct intel_shared_dpll *pll)
3639 {
3640 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3641 	const enum intel_dpll_id id = pll->info->id;
3642 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3643 
3644 	if (IS_ALDERLAKE_S(dev_priv)) {
3645 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3646 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3647 	} else if (IS_DG1(dev_priv)) {
3648 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3649 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3650 	} else if (IS_ROCKETLAKE(dev_priv)) {
3651 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3652 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3653 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3654 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3655 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3656 		div0_reg = TGL_DPLL0_DIV0(id);
3657 	} else {
3658 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3659 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3660 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3661 		} else {
3662 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3663 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3664 		}
3665 	}
3666 
3667 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3668 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3669 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3670 			 !i915_mmio_reg_valid(div0_reg));
3671 	if (dev_priv->display.vbt.override_afc_startup &&
3672 	    i915_mmio_reg_valid(div0_reg))
3673 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3674 			     hw_state->div0);
3675 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3676 }
3677 
3678 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3679 			     struct intel_shared_dpll *pll)
3680 {
3681 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3682 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3683 	u32 val;
3684 
3685 	/*
3686 	 * Some of the following registers have reserved fields, so program
3687 	 * these with RMW based on a mask. The mask can be fixed or generated
3688 	 * during the calc/readout phase if the mask depends on some other HW
3689 	 * state like refclk, see icl_calc_mg_pll_state().
3690 	 */
3691 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3692 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3693 	val |= hw_state->mg_refclkin_ctl;
3694 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3695 
3696 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3697 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3698 	val |= hw_state->mg_clktop2_coreclkctl1;
3699 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3700 
3701 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3702 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3703 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3704 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3705 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3706 	val |= hw_state->mg_clktop2_hsclkctl;
3707 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3708 
3709 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3710 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3711 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3712 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3713 		       hw_state->mg_pll_frac_lock);
3714 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3715 
3716 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3717 	val &= ~hw_state->mg_pll_bias_mask;
3718 	val |= hw_state->mg_pll_bias;
3719 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3720 
3721 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3722 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3723 	val |= hw_state->mg_pll_tdc_coldst_bias;
3724 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3725 
3726 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3727 }
3728 
3729 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3730 			  struct intel_shared_dpll *pll)
3731 {
3732 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3733 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3734 	u32 val;
3735 
3736 	/*
3737 	 * All registers programmed here have the same HIP_INDEX_REG even
3738 	 * though on different building block
3739 	 */
3740 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3741 		       HIP_INDEX_VAL(tc_port, 0x2));
3742 
3743 	/* All the registers are RMW */
3744 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3745 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3746 	val |= hw_state->mg_refclkin_ctl;
3747 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3748 
3749 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3750 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3751 	val |= hw_state->mg_clktop2_coreclkctl1;
3752 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3753 
3754 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3755 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3756 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3757 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3758 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3759 	val |= hw_state->mg_clktop2_hsclkctl;
3760 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3761 
3762 	val = DKL_PLL_DIV0_MASK;
3763 	if (dev_priv->display.vbt.override_afc_startup)
3764 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3765 	intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3766 		     hw_state->mg_pll_div0);
3767 
3768 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3769 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3770 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3771 	val |= hw_state->mg_pll_div1;
3772 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3773 
3774 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3775 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3776 		 DKL_PLL_SSC_STEP_LEN_MASK |
3777 		 DKL_PLL_SSC_STEP_NUM_MASK |
3778 		 DKL_PLL_SSC_EN);
3779 	val |= hw_state->mg_pll_ssc;
3780 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3781 
3782 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3783 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3784 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3785 	val |= hw_state->mg_pll_bias;
3786 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3787 
3788 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3789 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3790 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3791 	val |= hw_state->mg_pll_tdc_coldst_bias;
3792 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3793 
3794 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3795 }
3796 
3797 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3798 				 struct intel_shared_dpll *pll,
3799 				 i915_reg_t enable_reg)
3800 {
3801 	u32 val;
3802 
3803 	val = intel_de_read(dev_priv, enable_reg);
3804 	val |= PLL_POWER_ENABLE;
3805 	intel_de_write(dev_priv, enable_reg, val);
3806 
3807 	/*
3808 	 * The spec says we need to "wait" but it also says it should be
3809 	 * immediate.
3810 	 */
3811 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3812 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3813 			pll->info->id);
3814 }
3815 
3816 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3817 			   struct intel_shared_dpll *pll,
3818 			   i915_reg_t enable_reg)
3819 {
3820 	u32 val;
3821 
3822 	val = intel_de_read(dev_priv, enable_reg);
3823 	val |= PLL_ENABLE;
3824 	intel_de_write(dev_priv, enable_reg, val);
3825 
3826 	/* Timeout is actually 600us. */
3827 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3828 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3829 }
3830 
3831 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3832 {
3833 	u32 val;
3834 
3835 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3836 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3837 		return;
3838 	/*
3839 	 * Wa_16011069516:adl-p[a0]
3840 	 *
3841 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3842 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3843 	 * sanity check this assumption with a double read, which presumably
3844 	 * returns the correct value even with clock gating on.
3845 	 *
3846 	 * Instead of the usual place for workarounds we apply this one here,
3847 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3848 	 */
3849 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3850 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3851 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3852 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3853 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3854 }
3855 
3856 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3857 			     struct intel_shared_dpll *pll)
3858 {
3859 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3860 
3861 	if (IS_JSL_EHL(dev_priv) &&
3862 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3863 
3864 		/*
3865 		 * We need to disable DC states when this DPLL is enabled.
3866 		 * This can be done by taking a reference on DPLL4 power
3867 		 * domain.
3868 		 */
3869 		pll->wakeref = intel_display_power_get(dev_priv,
3870 						       POWER_DOMAIN_DC_OFF);
3871 	}
3872 
3873 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3874 
3875 	icl_dpll_write(dev_priv, pll);
3876 
3877 	/*
3878 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3879 	 * paths should already be setting the appropriate voltage, hence we do
3880 	 * nothing here.
3881 	 */
3882 
3883 	icl_pll_enable(dev_priv, pll, enable_reg);
3884 
3885 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3886 
3887 	/* DVFS post sequence would be here. See the comment above. */
3888 }
3889 
3890 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3891 			   struct intel_shared_dpll *pll)
3892 {
3893 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3894 
3895 	icl_dpll_write(dev_priv, pll);
3896 
3897 	/*
3898 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3899 	 * paths should already be setting the appropriate voltage, hence we do
3900 	 * nothing here.
3901 	 */
3902 
3903 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3904 
3905 	/* DVFS post sequence would be here. See the comment above. */
3906 }
3907 
3908 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3909 			  struct intel_shared_dpll *pll)
3910 {
3911 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3912 
3913 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3914 
3915 	if (DISPLAY_VER(dev_priv) >= 12)
3916 		dkl_pll_write(dev_priv, pll);
3917 	else
3918 		icl_mg_pll_write(dev_priv, pll);
3919 
3920 	/*
3921 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3922 	 * paths should already be setting the appropriate voltage, hence we do
3923 	 * nothing here.
3924 	 */
3925 
3926 	icl_pll_enable(dev_priv, pll, enable_reg);
3927 
3928 	/* DVFS post sequence would be here. See the comment above. */
3929 }
3930 
3931 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3932 			    struct intel_shared_dpll *pll,
3933 			    i915_reg_t enable_reg)
3934 {
3935 	u32 val;
3936 
3937 	/* The first steps are done by intel_ddi_post_disable(). */
3938 
3939 	/*
3940 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3941 	 * paths should already be setting the appropriate voltage, hence we do
3942 	 * nothing here.
3943 	 */
3944 
3945 	val = intel_de_read(dev_priv, enable_reg);
3946 	val &= ~PLL_ENABLE;
3947 	intel_de_write(dev_priv, enable_reg, val);
3948 
3949 	/* Timeout is actually 1us. */
3950 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3951 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3952 
3953 	/* DVFS post sequence would be here. See the comment above. */
3954 
3955 	val = intel_de_read(dev_priv, enable_reg);
3956 	val &= ~PLL_POWER_ENABLE;
3957 	intel_de_write(dev_priv, enable_reg, val);
3958 
3959 	/*
3960 	 * The spec says we need to "wait" but it also says it should be
3961 	 * immediate.
3962 	 */
3963 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3964 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3965 			pll->info->id);
3966 }
3967 
3968 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3969 			      struct intel_shared_dpll *pll)
3970 {
3971 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3972 
3973 	icl_pll_disable(dev_priv, pll, enable_reg);
3974 
3975 	if (IS_JSL_EHL(dev_priv) &&
3976 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3977 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3978 					pll->wakeref);
3979 }
3980 
3981 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3982 			    struct intel_shared_dpll *pll)
3983 {
3984 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3985 }
3986 
3987 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3988 			   struct intel_shared_dpll *pll)
3989 {
3990 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3991 
3992 	icl_pll_disable(dev_priv, pll, enable_reg);
3993 }
3994 
3995 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3996 {
3997 	/* No SSC ref */
3998 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3999 }
4000 
4001 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4002 			      const struct intel_dpll_hw_state *hw_state)
4003 {
4004 	drm_dbg_kms(&dev_priv->drm,
4005 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
4006 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4007 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4008 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4009 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4010 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4011 		    hw_state->cfgcr0, hw_state->cfgcr1,
4012 		    hw_state->div0,
4013 		    hw_state->mg_refclkin_ctl,
4014 		    hw_state->mg_clktop2_coreclkctl1,
4015 		    hw_state->mg_clktop2_hsclkctl,
4016 		    hw_state->mg_pll_div0,
4017 		    hw_state->mg_pll_div1,
4018 		    hw_state->mg_pll_lf,
4019 		    hw_state->mg_pll_frac_lock,
4020 		    hw_state->mg_pll_ssc,
4021 		    hw_state->mg_pll_bias,
4022 		    hw_state->mg_pll_tdc_coldst_bias);
4023 }
4024 
4025 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4026 	.enable = combo_pll_enable,
4027 	.disable = combo_pll_disable,
4028 	.get_hw_state = combo_pll_get_hw_state,
4029 	.get_freq = icl_ddi_combo_pll_get_freq,
4030 };
4031 
4032 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4033 	.enable = tbt_pll_enable,
4034 	.disable = tbt_pll_disable,
4035 	.get_hw_state = tbt_pll_get_hw_state,
4036 	.get_freq = icl_ddi_tbt_pll_get_freq,
4037 };
4038 
4039 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4040 	.enable = mg_pll_enable,
4041 	.disable = mg_pll_disable,
4042 	.get_hw_state = mg_pll_get_hw_state,
4043 	.get_freq = icl_ddi_mg_pll_get_freq,
4044 };
4045 
4046 static const struct dpll_info icl_plls[] = {
4047 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4048 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4049 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4050 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4051 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4052 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4053 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4054 	{ },
4055 };
4056 
4057 static const struct intel_dpll_mgr icl_pll_mgr = {
4058 	.dpll_info = icl_plls,
4059 	.compute_dplls = icl_compute_dplls,
4060 	.get_dplls = icl_get_dplls,
4061 	.put_dplls = icl_put_dplls,
4062 	.update_active_dpll = icl_update_active_dpll,
4063 	.update_ref_clks = icl_update_dpll_ref_clks,
4064 	.dump_hw_state = icl_dump_hw_state,
4065 };
4066 
4067 static const struct dpll_info ehl_plls[] = {
4068 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4069 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4070 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4071 	{ },
4072 };
4073 
4074 static const struct intel_dpll_mgr ehl_pll_mgr = {
4075 	.dpll_info = ehl_plls,
4076 	.compute_dplls = icl_compute_dplls,
4077 	.get_dplls = icl_get_dplls,
4078 	.put_dplls = icl_put_dplls,
4079 	.update_ref_clks = icl_update_dpll_ref_clks,
4080 	.dump_hw_state = icl_dump_hw_state,
4081 };
4082 
4083 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4084 	.enable = mg_pll_enable,
4085 	.disable = mg_pll_disable,
4086 	.get_hw_state = dkl_pll_get_hw_state,
4087 	.get_freq = icl_ddi_mg_pll_get_freq,
4088 };
4089 
4090 static const struct dpll_info tgl_plls[] = {
4091 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4092 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4093 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4094 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4095 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4096 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4097 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4098 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4099 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4100 	{ },
4101 };
4102 
4103 static const struct intel_dpll_mgr tgl_pll_mgr = {
4104 	.dpll_info = tgl_plls,
4105 	.compute_dplls = icl_compute_dplls,
4106 	.get_dplls = icl_get_dplls,
4107 	.put_dplls = icl_put_dplls,
4108 	.update_active_dpll = icl_update_active_dpll,
4109 	.update_ref_clks = icl_update_dpll_ref_clks,
4110 	.dump_hw_state = icl_dump_hw_state,
4111 };
4112 
4113 static const struct dpll_info rkl_plls[] = {
4114 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4115 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4116 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4117 	{ },
4118 };
4119 
4120 static const struct intel_dpll_mgr rkl_pll_mgr = {
4121 	.dpll_info = rkl_plls,
4122 	.compute_dplls = icl_compute_dplls,
4123 	.get_dplls = icl_get_dplls,
4124 	.put_dplls = icl_put_dplls,
4125 	.update_ref_clks = icl_update_dpll_ref_clks,
4126 	.dump_hw_state = icl_dump_hw_state,
4127 };
4128 
4129 static const struct dpll_info dg1_plls[] = {
4130 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4131 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4132 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4133 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4134 	{ },
4135 };
4136 
4137 static const struct intel_dpll_mgr dg1_pll_mgr = {
4138 	.dpll_info = dg1_plls,
4139 	.compute_dplls = icl_compute_dplls,
4140 	.get_dplls = icl_get_dplls,
4141 	.put_dplls = icl_put_dplls,
4142 	.update_ref_clks = icl_update_dpll_ref_clks,
4143 	.dump_hw_state = icl_dump_hw_state,
4144 };
4145 
4146 static const struct dpll_info adls_plls[] = {
4147 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4148 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4149 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4150 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4151 	{ },
4152 };
4153 
4154 static const struct intel_dpll_mgr adls_pll_mgr = {
4155 	.dpll_info = adls_plls,
4156 	.compute_dplls = icl_compute_dplls,
4157 	.get_dplls = icl_get_dplls,
4158 	.put_dplls = icl_put_dplls,
4159 	.update_ref_clks = icl_update_dpll_ref_clks,
4160 	.dump_hw_state = icl_dump_hw_state,
4161 };
4162 
4163 static const struct dpll_info adlp_plls[] = {
4164 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4165 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4166 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4167 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4168 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4169 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4170 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4171 	{ },
4172 };
4173 
4174 static const struct intel_dpll_mgr adlp_pll_mgr = {
4175 	.dpll_info = adlp_plls,
4176 	.compute_dplls = icl_compute_dplls,
4177 	.get_dplls = icl_get_dplls,
4178 	.put_dplls = icl_put_dplls,
4179 	.update_active_dpll = icl_update_active_dpll,
4180 	.update_ref_clks = icl_update_dpll_ref_clks,
4181 	.dump_hw_state = icl_dump_hw_state,
4182 };
4183 
4184 /**
4185  * intel_shared_dpll_init - Initialize shared DPLLs
4186  * @dev_priv: i915 device
4187  *
4188  * Initialize shared DPLLs for @dev_priv.
4189  */
4190 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4191 {
4192 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4193 	const struct dpll_info *dpll_info;
4194 	int i;
4195 
4196 	if (IS_DG2(dev_priv))
4197 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4198 		dpll_mgr = NULL;
4199 	else if (IS_ALDERLAKE_P(dev_priv))
4200 		dpll_mgr = &adlp_pll_mgr;
4201 	else if (IS_ALDERLAKE_S(dev_priv))
4202 		dpll_mgr = &adls_pll_mgr;
4203 	else if (IS_DG1(dev_priv))
4204 		dpll_mgr = &dg1_pll_mgr;
4205 	else if (IS_ROCKETLAKE(dev_priv))
4206 		dpll_mgr = &rkl_pll_mgr;
4207 	else if (DISPLAY_VER(dev_priv) >= 12)
4208 		dpll_mgr = &tgl_pll_mgr;
4209 	else if (IS_JSL_EHL(dev_priv))
4210 		dpll_mgr = &ehl_pll_mgr;
4211 	else if (DISPLAY_VER(dev_priv) >= 11)
4212 		dpll_mgr = &icl_pll_mgr;
4213 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4214 		dpll_mgr = &bxt_pll_mgr;
4215 	else if (DISPLAY_VER(dev_priv) == 9)
4216 		dpll_mgr = &skl_pll_mgr;
4217 	else if (HAS_DDI(dev_priv))
4218 		dpll_mgr = &hsw_pll_mgr;
4219 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4220 		dpll_mgr = &pch_pll_mgr;
4221 
4222 	if (!dpll_mgr) {
4223 		dev_priv->display.dpll.num_shared_dpll = 0;
4224 		return;
4225 	}
4226 
4227 	dpll_info = dpll_mgr->dpll_info;
4228 
4229 	for (i = 0; dpll_info[i].name; i++) {
4230 		if (drm_WARN_ON(&dev_priv->drm,
4231 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4232 			break;
4233 
4234 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4235 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4236 	}
4237 
4238 	dev_priv->display.dpll.mgr = dpll_mgr;
4239 	dev_priv->display.dpll.num_shared_dpll = i;
4240 	mutex_init(&dev_priv->display.dpll.lock);
4241 }
4242 
4243 /**
4244  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4245  * @state: atomic state
4246  * @crtc: CRTC to compute DPLLs for
4247  * @encoder: encoder
4248  *
4249  * This function computes the DPLL state for the given CRTC and encoder.
4250  *
4251  * The new configuration in the atomic commit @state is made effective by
4252  * calling intel_shared_dpll_swap_state().
4253  *
4254  * Returns:
4255  * 0 on success, negative error code on falure.
4256  */
4257 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4258 			       struct intel_crtc *crtc,
4259 			       struct intel_encoder *encoder)
4260 {
4261 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4262 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4263 
4264 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4265 		return -EINVAL;
4266 
4267 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4268 }
4269 
4270 /**
4271  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4272  * @state: atomic state
4273  * @crtc: CRTC to reserve DPLLs for
4274  * @encoder: encoder
4275  *
4276  * This function reserves all required DPLLs for the given CRTC and encoder
4277  * combination in the current atomic commit @state and the new @crtc atomic
4278  * state.
4279  *
4280  * The new configuration in the atomic commit @state is made effective by
4281  * calling intel_shared_dpll_swap_state().
4282  *
4283  * The reserved DPLLs should be released by calling
4284  * intel_release_shared_dplls().
4285  *
4286  * Returns:
4287  * 0 if all required DPLLs were successfully reserved,
4288  * negative error code otherwise.
4289  */
4290 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4291 			       struct intel_crtc *crtc,
4292 			       struct intel_encoder *encoder)
4293 {
4294 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4295 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4296 
4297 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4298 		return -EINVAL;
4299 
4300 	return dpll_mgr->get_dplls(state, crtc, encoder);
4301 }
4302 
4303 /**
4304  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4305  * @state: atomic state
4306  * @crtc: crtc from which the DPLLs are to be released
4307  *
4308  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4309  * from the current atomic commit @state and the old @crtc atomic state.
4310  *
4311  * The new configuration in the atomic commit @state is made effective by
4312  * calling intel_shared_dpll_swap_state().
4313  */
4314 void intel_release_shared_dplls(struct intel_atomic_state *state,
4315 				struct intel_crtc *crtc)
4316 {
4317 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4318 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4319 
4320 	/*
4321 	 * FIXME: this function is called for every platform having a
4322 	 * compute_clock hook, even though the platform doesn't yet support
4323 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4324 	 * called on those.
4325 	 */
4326 	if (!dpll_mgr)
4327 		return;
4328 
4329 	dpll_mgr->put_dplls(state, crtc);
4330 }
4331 
4332 /**
4333  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4334  * @state: atomic state
4335  * @crtc: the CRTC for which to update the active DPLL
4336  * @encoder: encoder determining the type of port DPLL
4337  *
4338  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4339  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4340  * DPLL selected will be based on the current mode of the encoder's port.
4341  */
4342 void intel_update_active_dpll(struct intel_atomic_state *state,
4343 			      struct intel_crtc *crtc,
4344 			      struct intel_encoder *encoder)
4345 {
4346 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4347 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4348 
4349 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4350 		return;
4351 
4352 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4353 }
4354 
4355 /**
4356  * intel_dpll_get_freq - calculate the DPLL's output frequency
4357  * @i915: i915 device
4358  * @pll: DPLL for which to calculate the output frequency
4359  * @pll_state: DPLL state from which to calculate the output frequency
4360  *
4361  * Return the output frequency corresponding to @pll's passed in @pll_state.
4362  */
4363 int intel_dpll_get_freq(struct drm_i915_private *i915,
4364 			const struct intel_shared_dpll *pll,
4365 			const struct intel_dpll_hw_state *pll_state)
4366 {
4367 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4368 		return 0;
4369 
4370 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4371 }
4372 
4373 /**
4374  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4375  * @i915: i915 device
4376  * @pll: DPLL for which to calculate the output frequency
4377  * @hw_state: DPLL's hardware state
4378  *
4379  * Read out @pll's hardware state into @hw_state.
4380  */
4381 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4382 			     struct intel_shared_dpll *pll,
4383 			     struct intel_dpll_hw_state *hw_state)
4384 {
4385 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4386 }
4387 
4388 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4389 				  struct intel_shared_dpll *pll)
4390 {
4391 	struct intel_crtc *crtc;
4392 
4393 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4394 
4395 	if (IS_JSL_EHL(i915) && pll->on &&
4396 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4397 		pll->wakeref = intel_display_power_get(i915,
4398 						       POWER_DOMAIN_DC_OFF);
4399 	}
4400 
4401 	pll->state.pipe_mask = 0;
4402 	for_each_intel_crtc(&i915->drm, crtc) {
4403 		struct intel_crtc_state *crtc_state =
4404 			to_intel_crtc_state(crtc->base.state);
4405 
4406 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4407 			pll->state.pipe_mask |= BIT(crtc->pipe);
4408 	}
4409 	pll->active_mask = pll->state.pipe_mask;
4410 
4411 	drm_dbg_kms(&i915->drm,
4412 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4413 		    pll->info->name, pll->state.pipe_mask, pll->on);
4414 }
4415 
4416 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4417 {
4418 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4419 		i915->display.dpll.mgr->update_ref_clks(i915);
4420 }
4421 
4422 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4423 {
4424 	int i;
4425 
4426 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4427 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4428 }
4429 
4430 static void sanitize_dpll_state(struct drm_i915_private *i915,
4431 				struct intel_shared_dpll *pll)
4432 {
4433 	if (!pll->on)
4434 		return;
4435 
4436 	adlp_cmtg_clock_gating_wa(i915, pll);
4437 
4438 	if (pll->active_mask)
4439 		return;
4440 
4441 	drm_dbg_kms(&i915->drm,
4442 		    "%s enabled but not in use, disabling\n",
4443 		    pll->info->name);
4444 
4445 	pll->info->funcs->disable(i915, pll);
4446 	pll->on = false;
4447 }
4448 
4449 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4450 {
4451 	int i;
4452 
4453 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4454 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4455 }
4456 
4457 /**
4458  * intel_dpll_dump_hw_state - write hw_state to dmesg
4459  * @dev_priv: i915 drm device
4460  * @hw_state: hw state to be written to the log
4461  *
4462  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4463  */
4464 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4465 			      const struct intel_dpll_hw_state *hw_state)
4466 {
4467 	if (dev_priv->display.dpll.mgr) {
4468 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4469 	} else {
4470 		/* fallback for platforms that don't use the shared dpll
4471 		 * infrastructure
4472 		 */
4473 		drm_dbg_kms(&dev_priv->drm,
4474 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4475 			    "fp0: 0x%x, fp1: 0x%x\n",
4476 			    hw_state->dpll,
4477 			    hw_state->dpll_md,
4478 			    hw_state->fp0,
4479 			    hw_state->fp1);
4480 	}
4481 }
4482 
4483 static void
4484 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4485 			 struct intel_shared_dpll *pll,
4486 			 struct intel_crtc *crtc,
4487 			 struct intel_crtc_state *new_crtc_state)
4488 {
4489 	struct intel_dpll_hw_state dpll_hw_state;
4490 	u8 pipe_mask;
4491 	bool active;
4492 
4493 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4494 
4495 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4496 
4497 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4498 
4499 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4500 		I915_STATE_WARN(!pll->on && pll->active_mask,
4501 				"pll in active use but not on in sw tracking\n");
4502 		I915_STATE_WARN(pll->on && !pll->active_mask,
4503 				"pll is on but not used by any active pipe\n");
4504 		I915_STATE_WARN(pll->on != active,
4505 				"pll on state mismatch (expected %i, found %i)\n",
4506 				pll->on, active);
4507 	}
4508 
4509 	if (!crtc) {
4510 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4511 				"more active pll users than references: 0x%x vs 0x%x\n",
4512 				pll->active_mask, pll->state.pipe_mask);
4513 
4514 		return;
4515 	}
4516 
4517 	pipe_mask = BIT(crtc->pipe);
4518 
4519 	if (new_crtc_state->hw.active)
4520 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4521 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4522 				pipe_name(crtc->pipe), pll->active_mask);
4523 	else
4524 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4525 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4526 				pipe_name(crtc->pipe), pll->active_mask);
4527 
4528 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4529 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4530 			pipe_mask, pll->state.pipe_mask);
4531 
4532 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4533 					  &dpll_hw_state,
4534 					  sizeof(dpll_hw_state)),
4535 			"pll hw state mismatch\n");
4536 }
4537 
4538 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4539 				    struct intel_crtc_state *old_crtc_state,
4540 				    struct intel_crtc_state *new_crtc_state)
4541 {
4542 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4543 
4544 	if (new_crtc_state->shared_dpll)
4545 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4546 					 crtc, new_crtc_state);
4547 
4548 	if (old_crtc_state->shared_dpll &&
4549 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4550 		u8 pipe_mask = BIT(crtc->pipe);
4551 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4552 
4553 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4554 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4555 				pipe_name(crtc->pipe), pll->active_mask);
4556 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4557 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4558 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4559 	}
4560 }
4561 
4562 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4563 {
4564 	int i;
4565 
4566 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4567 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4568 					 NULL, NULL);
4569 }
4570