1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "i915_reg.h"
27 #include "intel_de.h"
28 #include "intel_display_types.h"
29 #include "intel_dkl_phy.h"
30 #include "intel_dkl_phy_regs.h"
31 #include "intel_dpio_phy.h"
32 #include "intel_dpll.h"
33 #include "intel_dpll_mgr.h"
34 #include "intel_mg_phy_regs.h"
35 #include "intel_pch_refclk.h"
36 #include "intel_tc.h"
37 
38 /**
39  * DOC: Display PLLs
40  *
41  * Display PLLs used for driving outputs vary by platform. While some have
42  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
43  * from a pool. In the latter scenario, it is possible that multiple pipes
44  * share a PLL if their configurations match.
45  *
46  * This file provides an abstraction over display PLLs. The function
47  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
48  * users of a PLL are tracked and that tracking is integrated with the atomic
49  * modset interface. During an atomic operation, required PLLs can be reserved
50  * for a given CRTC and encoder configuration by calling
51  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
52  * with intel_release_shared_dplls().
53  * Changes to the users are first staged in the atomic state, and then made
54  * effective by calling intel_shared_dpll_swap_state() during the atomic
55  * commit phase.
56  */
57 
58 /* platform specific hooks for managing DPLLs */
59 struct intel_shared_dpll_funcs {
60 	/*
61 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
62 	 * the pll is not already enabled.
63 	 */
64 	void (*enable)(struct drm_i915_private *i915,
65 		       struct intel_shared_dpll *pll);
66 
67 	/*
68 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
69 	 * only when it is safe to disable the pll, i.e., there are no more
70 	 * tracked users for it.
71 	 */
72 	void (*disable)(struct drm_i915_private *i915,
73 			struct intel_shared_dpll *pll);
74 
75 	/*
76 	 * Hook for reading the values currently programmed to the DPLL
77 	 * registers. This is used for initial hw state readout and state
78 	 * verification after a mode set.
79 	 */
80 	bool (*get_hw_state)(struct drm_i915_private *i915,
81 			     struct intel_shared_dpll *pll,
82 			     struct intel_dpll_hw_state *hw_state);
83 
84 	/*
85 	 * Hook for calculating the pll's output frequency based on its passed
86 	 * in state.
87 	 */
88 	int (*get_freq)(struct drm_i915_private *i915,
89 			const struct intel_shared_dpll *pll,
90 			const struct intel_dpll_hw_state *pll_state);
91 };
92 
93 struct intel_dpll_mgr {
94 	const struct dpll_info *dpll_info;
95 
96 	int (*compute_dplls)(struct intel_atomic_state *state,
97 			     struct intel_crtc *crtc,
98 			     struct intel_encoder *encoder);
99 	int (*get_dplls)(struct intel_atomic_state *state,
100 			 struct intel_crtc *crtc,
101 			 struct intel_encoder *encoder);
102 	void (*put_dplls)(struct intel_atomic_state *state,
103 			  struct intel_crtc *crtc);
104 	void (*update_active_dpll)(struct intel_atomic_state *state,
105 				   struct intel_crtc *crtc,
106 				   struct intel_encoder *encoder);
107 	void (*update_ref_clks)(struct drm_i915_private *i915);
108 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
109 			      const struct intel_dpll_hw_state *hw_state);
110 };
111 
112 static void
113 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
114 				  struct intel_shared_dpll_state *shared_dpll)
115 {
116 	enum intel_dpll_id i;
117 
118 	/* Copy shared dpll state */
119 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
120 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
121 
122 		shared_dpll[i] = pll->state;
123 	}
124 }
125 
126 static struct intel_shared_dpll_state *
127 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
128 {
129 	struct intel_atomic_state *state = to_intel_atomic_state(s);
130 
131 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
132 
133 	if (!state->dpll_set) {
134 		state->dpll_set = true;
135 
136 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
137 						  state->shared_dpll);
138 	}
139 
140 	return state->shared_dpll;
141 }
142 
143 /**
144  * intel_get_shared_dpll_by_id - get a DPLL given its id
145  * @dev_priv: i915 device instance
146  * @id: pll id
147  *
148  * Returns:
149  * A pointer to the DPLL with @id
150  */
151 struct intel_shared_dpll *
152 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
153 			    enum intel_dpll_id id)
154 {
155 	return &dev_priv->display.dpll.shared_dplls[id];
156 }
157 
158 /* For ILK+ */
159 void assert_shared_dpll(struct drm_i915_private *dev_priv,
160 			struct intel_shared_dpll *pll,
161 			bool state)
162 {
163 	bool cur_state;
164 	struct intel_dpll_hw_state hw_state;
165 
166 	if (drm_WARN(&dev_priv->drm, !pll,
167 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
168 		return;
169 
170 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
171 	I915_STATE_WARN(cur_state != state,
172 	     "%s assertion failure (expected %s, current %s)\n",
173 			pll->info->name, str_on_off(state),
174 			str_on_off(cur_state));
175 }
176 
177 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
178 {
179 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
180 }
181 
182 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
183 {
184 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
185 }
186 
187 static i915_reg_t
188 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
189 			   struct intel_shared_dpll *pll)
190 {
191 	if (IS_DG1(i915))
192 		return DG1_DPLL_ENABLE(pll->info->id);
193 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
194 		return MG_PLL_ENABLE(0);
195 
196 	return ICL_DPLL_ENABLE(pll->info->id);
197 }
198 
199 static i915_reg_t
200 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
201 			struct intel_shared_dpll *pll)
202 {
203 	const enum intel_dpll_id id = pll->info->id;
204 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
205 
206 	if (IS_ALDERLAKE_P(i915))
207 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
208 
209 	return MG_PLL_ENABLE(tc_port);
210 }
211 
212 /**
213  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
214  * @crtc_state: CRTC, and its state, which has a shared DPLL
215  *
216  * Enable the shared DPLL used by @crtc.
217  */
218 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
219 {
220 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
221 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
222 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
223 	unsigned int pipe_mask = BIT(crtc->pipe);
224 	unsigned int old_mask;
225 
226 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
227 		return;
228 
229 	mutex_lock(&dev_priv->display.dpll.lock);
230 	old_mask = pll->active_mask;
231 
232 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
233 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
234 		goto out;
235 
236 	pll->active_mask |= pipe_mask;
237 
238 	drm_dbg_kms(&dev_priv->drm,
239 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
240 		    pll->info->name, pll->active_mask, pll->on,
241 		    crtc->base.base.id, crtc->base.name);
242 
243 	if (old_mask) {
244 		drm_WARN_ON(&dev_priv->drm, !pll->on);
245 		assert_shared_dpll_enabled(dev_priv, pll);
246 		goto out;
247 	}
248 	drm_WARN_ON(&dev_priv->drm, pll->on);
249 
250 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
251 	pll->info->funcs->enable(dev_priv, pll);
252 	pll->on = true;
253 
254 out:
255 	mutex_unlock(&dev_priv->display.dpll.lock);
256 }
257 
258 /**
259  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
260  * @crtc_state: CRTC, and its state, which has a shared DPLL
261  *
262  * Disable the shared DPLL used by @crtc.
263  */
264 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
265 {
266 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
267 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
268 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
269 	unsigned int pipe_mask = BIT(crtc->pipe);
270 
271 	/* PCH only available on ILK+ */
272 	if (DISPLAY_VER(dev_priv) < 5)
273 		return;
274 
275 	if (pll == NULL)
276 		return;
277 
278 	mutex_lock(&dev_priv->display.dpll.lock);
279 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
280 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
281 		     crtc->base.base.id, crtc->base.name))
282 		goto out;
283 
284 	drm_dbg_kms(&dev_priv->drm,
285 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
286 		    pll->info->name, pll->active_mask, pll->on,
287 		    crtc->base.base.id, crtc->base.name);
288 
289 	assert_shared_dpll_enabled(dev_priv, pll);
290 	drm_WARN_ON(&dev_priv->drm, !pll->on);
291 
292 	pll->active_mask &= ~pipe_mask;
293 	if (pll->active_mask)
294 		goto out;
295 
296 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
297 	pll->info->funcs->disable(dev_priv, pll);
298 	pll->on = false;
299 
300 out:
301 	mutex_unlock(&dev_priv->display.dpll.lock);
302 }
303 
304 static struct intel_shared_dpll *
305 intel_find_shared_dpll(struct intel_atomic_state *state,
306 		       const struct intel_crtc *crtc,
307 		       const struct intel_dpll_hw_state *pll_state,
308 		       unsigned long dpll_mask)
309 {
310 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
311 	struct intel_shared_dpll *pll, *unused_pll = NULL;
312 	struct intel_shared_dpll_state *shared_dpll;
313 	enum intel_dpll_id i;
314 
315 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
316 
317 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
318 
319 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
320 		pll = &dev_priv->display.dpll.shared_dplls[i];
321 
322 		/* Only want to check enabled timings first */
323 		if (shared_dpll[i].pipe_mask == 0) {
324 			if (!unused_pll)
325 				unused_pll = pll;
326 			continue;
327 		}
328 
329 		if (memcmp(pll_state,
330 			   &shared_dpll[i].hw_state,
331 			   sizeof(*pll_state)) == 0) {
332 			drm_dbg_kms(&dev_priv->drm,
333 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
334 				    crtc->base.base.id, crtc->base.name,
335 				    pll->info->name,
336 				    shared_dpll[i].pipe_mask,
337 				    pll->active_mask);
338 			return pll;
339 		}
340 	}
341 
342 	/* Ok no matching timings, maybe there's a free one? */
343 	if (unused_pll) {
344 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
345 			    crtc->base.base.id, crtc->base.name,
346 			    unused_pll->info->name);
347 		return unused_pll;
348 	}
349 
350 	return NULL;
351 }
352 
353 static void
354 intel_reference_shared_dpll(struct intel_atomic_state *state,
355 			    const struct intel_crtc *crtc,
356 			    const struct intel_shared_dpll *pll,
357 			    const struct intel_dpll_hw_state *pll_state)
358 {
359 	struct drm_i915_private *i915 = to_i915(state->base.dev);
360 	struct intel_shared_dpll_state *shared_dpll;
361 	const enum intel_dpll_id id = pll->info->id;
362 
363 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
364 
365 	if (shared_dpll[id].pipe_mask == 0)
366 		shared_dpll[id].hw_state = *pll_state;
367 
368 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0);
369 
370 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
371 
372 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
373 		    crtc->base.base.id, crtc->base.name, pll->info->name);
374 }
375 
376 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
377 					  const struct intel_crtc *crtc,
378 					  const struct intel_shared_dpll *pll)
379 {
380 	struct drm_i915_private *i915 = to_i915(state->base.dev);
381 	struct intel_shared_dpll_state *shared_dpll;
382 	const enum intel_dpll_id id = pll->info->id;
383 
384 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
385 
386 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0);
387 
388 	shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe);
389 
390 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
391 		    crtc->base.base.id, crtc->base.name, pll->info->name);
392 }
393 
394 static void intel_put_dpll(struct intel_atomic_state *state,
395 			   struct intel_crtc *crtc)
396 {
397 	const struct intel_crtc_state *old_crtc_state =
398 		intel_atomic_get_old_crtc_state(state, crtc);
399 	struct intel_crtc_state *new_crtc_state =
400 		intel_atomic_get_new_crtc_state(state, crtc);
401 
402 	new_crtc_state->shared_dpll = NULL;
403 
404 	if (!old_crtc_state->shared_dpll)
405 		return;
406 
407 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
408 }
409 
410 /**
411  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
412  * @state: atomic state
413  *
414  * This is the dpll version of drm_atomic_helper_swap_state() since the
415  * helper does not handle driver-specific global state.
416  *
417  * For consistency with atomic helpers this function does a complete swap,
418  * i.e. it also puts the current state into @state, even though there is no
419  * need for that at this moment.
420  */
421 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
422 {
423 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
424 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
425 	enum intel_dpll_id i;
426 
427 	if (!state->dpll_set)
428 		return;
429 
430 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
431 		struct intel_shared_dpll *pll =
432 			&dev_priv->display.dpll.shared_dplls[i];
433 
434 		swap(pll->state, shared_dpll[i]);
435 	}
436 }
437 
438 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
439 				      struct intel_shared_dpll *pll,
440 				      struct intel_dpll_hw_state *hw_state)
441 {
442 	const enum intel_dpll_id id = pll->info->id;
443 	intel_wakeref_t wakeref;
444 	u32 val;
445 
446 	wakeref = intel_display_power_get_if_enabled(dev_priv,
447 						     POWER_DOMAIN_DISPLAY_CORE);
448 	if (!wakeref)
449 		return false;
450 
451 	val = intel_de_read(dev_priv, PCH_DPLL(id));
452 	hw_state->dpll = val;
453 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
454 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
455 
456 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
457 
458 	return val & DPLL_VCO_ENABLE;
459 }
460 
461 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
462 {
463 	u32 val;
464 	bool enabled;
465 
466 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
467 
468 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
469 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
470 			    DREF_SUPERSPREAD_SOURCE_MASK));
471 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
472 }
473 
474 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
475 				struct intel_shared_dpll *pll)
476 {
477 	const enum intel_dpll_id id = pll->info->id;
478 
479 	/* PCH refclock must be enabled first */
480 	ibx_assert_pch_refclk_enabled(dev_priv);
481 
482 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
483 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
484 
485 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
486 
487 	/* Wait for the clocks to stabilize. */
488 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
489 	udelay(150);
490 
491 	/* The pixel multiplier can only be updated once the
492 	 * DPLL is enabled and the clocks are stable.
493 	 *
494 	 * So write it again.
495 	 */
496 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
497 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
498 	udelay(200);
499 }
500 
501 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
502 				 struct intel_shared_dpll *pll)
503 {
504 	const enum intel_dpll_id id = pll->info->id;
505 
506 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
507 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
508 	udelay(200);
509 }
510 
511 static int ibx_compute_dpll(struct intel_atomic_state *state,
512 			    struct intel_crtc *crtc,
513 			    struct intel_encoder *encoder)
514 {
515 	return 0;
516 }
517 
518 static int ibx_get_dpll(struct intel_atomic_state *state,
519 			struct intel_crtc *crtc,
520 			struct intel_encoder *encoder)
521 {
522 	struct intel_crtc_state *crtc_state =
523 		intel_atomic_get_new_crtc_state(state, crtc);
524 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
525 	struct intel_shared_dpll *pll;
526 	enum intel_dpll_id i;
527 
528 	if (HAS_PCH_IBX(dev_priv)) {
529 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
530 		i = (enum intel_dpll_id) crtc->pipe;
531 		pll = &dev_priv->display.dpll.shared_dplls[i];
532 
533 		drm_dbg_kms(&dev_priv->drm,
534 			    "[CRTC:%d:%s] using pre-allocated %s\n",
535 			    crtc->base.base.id, crtc->base.name,
536 			    pll->info->name);
537 	} else {
538 		pll = intel_find_shared_dpll(state, crtc,
539 					     &crtc_state->dpll_hw_state,
540 					     BIT(DPLL_ID_PCH_PLL_B) |
541 					     BIT(DPLL_ID_PCH_PLL_A));
542 	}
543 
544 	if (!pll)
545 		return -EINVAL;
546 
547 	/* reference the pll */
548 	intel_reference_shared_dpll(state, crtc,
549 				    pll, &crtc_state->dpll_hw_state);
550 
551 	crtc_state->shared_dpll = pll;
552 
553 	return 0;
554 }
555 
556 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
557 			      const struct intel_dpll_hw_state *hw_state)
558 {
559 	drm_dbg_kms(&dev_priv->drm,
560 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
561 		    "fp0: 0x%x, fp1: 0x%x\n",
562 		    hw_state->dpll,
563 		    hw_state->dpll_md,
564 		    hw_state->fp0,
565 		    hw_state->fp1);
566 }
567 
568 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
569 	.enable = ibx_pch_dpll_enable,
570 	.disable = ibx_pch_dpll_disable,
571 	.get_hw_state = ibx_pch_dpll_get_hw_state,
572 };
573 
574 static const struct dpll_info pch_plls[] = {
575 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
576 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
577 	{ },
578 };
579 
580 static const struct intel_dpll_mgr pch_pll_mgr = {
581 	.dpll_info = pch_plls,
582 	.compute_dplls = ibx_compute_dpll,
583 	.get_dplls = ibx_get_dpll,
584 	.put_dplls = intel_put_dpll,
585 	.dump_hw_state = ibx_dump_hw_state,
586 };
587 
588 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
589 				 struct intel_shared_dpll *pll)
590 {
591 	const enum intel_dpll_id id = pll->info->id;
592 
593 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
594 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
595 	udelay(20);
596 }
597 
598 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
599 				struct intel_shared_dpll *pll)
600 {
601 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
602 	intel_de_posting_read(dev_priv, SPLL_CTL);
603 	udelay(20);
604 }
605 
606 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
607 				  struct intel_shared_dpll *pll)
608 {
609 	const enum intel_dpll_id id = pll->info->id;
610 	u32 val;
611 
612 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
613 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
614 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
615 
616 	/*
617 	 * Try to set up the PCH reference clock once all DPLLs
618 	 * that depend on it have been shut down.
619 	 */
620 	if (dev_priv->pch_ssc_use & BIT(id))
621 		intel_init_pch_refclk(dev_priv);
622 }
623 
624 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
625 				 struct intel_shared_dpll *pll)
626 {
627 	enum intel_dpll_id id = pll->info->id;
628 	u32 val;
629 
630 	val = intel_de_read(dev_priv, SPLL_CTL);
631 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
632 	intel_de_posting_read(dev_priv, SPLL_CTL);
633 
634 	/*
635 	 * Try to set up the PCH reference clock once all DPLLs
636 	 * that depend on it have been shut down.
637 	 */
638 	if (dev_priv->pch_ssc_use & BIT(id))
639 		intel_init_pch_refclk(dev_priv);
640 }
641 
642 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
643 				       struct intel_shared_dpll *pll,
644 				       struct intel_dpll_hw_state *hw_state)
645 {
646 	const enum intel_dpll_id id = pll->info->id;
647 	intel_wakeref_t wakeref;
648 	u32 val;
649 
650 	wakeref = intel_display_power_get_if_enabled(dev_priv,
651 						     POWER_DOMAIN_DISPLAY_CORE);
652 	if (!wakeref)
653 		return false;
654 
655 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
656 	hw_state->wrpll = val;
657 
658 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
659 
660 	return val & WRPLL_PLL_ENABLE;
661 }
662 
663 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
664 				      struct intel_shared_dpll *pll,
665 				      struct intel_dpll_hw_state *hw_state)
666 {
667 	intel_wakeref_t wakeref;
668 	u32 val;
669 
670 	wakeref = intel_display_power_get_if_enabled(dev_priv,
671 						     POWER_DOMAIN_DISPLAY_CORE);
672 	if (!wakeref)
673 		return false;
674 
675 	val = intel_de_read(dev_priv, SPLL_CTL);
676 	hw_state->spll = val;
677 
678 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
679 
680 	return val & SPLL_PLL_ENABLE;
681 }
682 
683 #define LC_FREQ 2700
684 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
685 
686 #define P_MIN 2
687 #define P_MAX 64
688 #define P_INC 2
689 
690 /* Constraints for PLL good behavior */
691 #define REF_MIN 48
692 #define REF_MAX 400
693 #define VCO_MIN 2400
694 #define VCO_MAX 4800
695 
696 struct hsw_wrpll_rnp {
697 	unsigned p, n2, r2;
698 };
699 
700 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
701 {
702 	switch (clock) {
703 	case 25175000:
704 	case 25200000:
705 	case 27000000:
706 	case 27027000:
707 	case 37762500:
708 	case 37800000:
709 	case 40500000:
710 	case 40541000:
711 	case 54000000:
712 	case 54054000:
713 	case 59341000:
714 	case 59400000:
715 	case 72000000:
716 	case 74176000:
717 	case 74250000:
718 	case 81000000:
719 	case 81081000:
720 	case 89012000:
721 	case 89100000:
722 	case 108000000:
723 	case 108108000:
724 	case 111264000:
725 	case 111375000:
726 	case 148352000:
727 	case 148500000:
728 	case 162000000:
729 	case 162162000:
730 	case 222525000:
731 	case 222750000:
732 	case 296703000:
733 	case 297000000:
734 		return 0;
735 	case 233500000:
736 	case 245250000:
737 	case 247750000:
738 	case 253250000:
739 	case 298000000:
740 		return 1500;
741 	case 169128000:
742 	case 169500000:
743 	case 179500000:
744 	case 202000000:
745 		return 2000;
746 	case 256250000:
747 	case 262500000:
748 	case 270000000:
749 	case 272500000:
750 	case 273750000:
751 	case 280750000:
752 	case 281250000:
753 	case 286000000:
754 	case 291750000:
755 		return 4000;
756 	case 267250000:
757 	case 268500000:
758 		return 5000;
759 	default:
760 		return 1000;
761 	}
762 }
763 
764 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
765 				 unsigned int r2, unsigned int n2,
766 				 unsigned int p,
767 				 struct hsw_wrpll_rnp *best)
768 {
769 	u64 a, b, c, d, diff, diff_best;
770 
771 	/* No best (r,n,p) yet */
772 	if (best->p == 0) {
773 		best->p = p;
774 		best->n2 = n2;
775 		best->r2 = r2;
776 		return;
777 	}
778 
779 	/*
780 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
781 	 * freq2k.
782 	 *
783 	 * delta = 1e6 *
784 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
785 	 *	   freq2k;
786 	 *
787 	 * and we would like delta <= budget.
788 	 *
789 	 * If the discrepancy is above the PPM-based budget, always prefer to
790 	 * improve upon the previous solution.  However, if you're within the
791 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
792 	 */
793 	a = freq2k * budget * p * r2;
794 	b = freq2k * budget * best->p * best->r2;
795 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
796 	diff_best = abs_diff(freq2k * best->p * best->r2,
797 			     LC_FREQ_2K * best->n2);
798 	c = 1000000 * diff;
799 	d = 1000000 * diff_best;
800 
801 	if (a < c && b < d) {
802 		/* If both are above the budget, pick the closer */
803 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
804 			best->p = p;
805 			best->n2 = n2;
806 			best->r2 = r2;
807 		}
808 	} else if (a >= c && b < d) {
809 		/* If A is below the threshold but B is above it?  Update. */
810 		best->p = p;
811 		best->n2 = n2;
812 		best->r2 = r2;
813 	} else if (a >= c && b >= d) {
814 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
815 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
816 			best->p = p;
817 			best->n2 = n2;
818 			best->r2 = r2;
819 		}
820 	}
821 	/* Otherwise a < c && b >= d, do nothing */
822 }
823 
824 static void
825 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
826 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
827 {
828 	u64 freq2k;
829 	unsigned p, n2, r2;
830 	struct hsw_wrpll_rnp best = {};
831 	unsigned budget;
832 
833 	freq2k = clock / 100;
834 
835 	budget = hsw_wrpll_get_budget_for_freq(clock);
836 
837 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
838 	 * and directly pass the LC PLL to it. */
839 	if (freq2k == 5400000) {
840 		*n2_out = 2;
841 		*p_out = 1;
842 		*r2_out = 2;
843 		return;
844 	}
845 
846 	/*
847 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
848 	 * the WR PLL.
849 	 *
850 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
851 	 * Injecting R2 = 2 * R gives:
852 	 *   REF_MAX * r2 > LC_FREQ * 2 and
853 	 *   REF_MIN * r2 < LC_FREQ * 2
854 	 *
855 	 * Which means the desired boundaries for r2 are:
856 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
857 	 *
858 	 */
859 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
860 	     r2 <= LC_FREQ * 2 / REF_MIN;
861 	     r2++) {
862 
863 		/*
864 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
865 		 *
866 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
867 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
868 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
869 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
870 		 *
871 		 * Which means the desired boundaries for n2 are:
872 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
873 		 */
874 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
875 		     n2 <= VCO_MAX * r2 / LC_FREQ;
876 		     n2++) {
877 
878 			for (p = P_MIN; p <= P_MAX; p += P_INC)
879 				hsw_wrpll_update_rnp(freq2k, budget,
880 						     r2, n2, p, &best);
881 		}
882 	}
883 
884 	*n2_out = best.n2;
885 	*p_out = best.p;
886 	*r2_out = best.r2;
887 }
888 
889 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
890 				  const struct intel_shared_dpll *pll,
891 				  const struct intel_dpll_hw_state *pll_state)
892 {
893 	int refclk;
894 	int n, p, r;
895 	u32 wrpll = pll_state->wrpll;
896 
897 	switch (wrpll & WRPLL_REF_MASK) {
898 	case WRPLL_REF_SPECIAL_HSW:
899 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
900 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
901 			refclk = dev_priv->display.dpll.ref_clks.nssc;
902 			break;
903 		}
904 		fallthrough;
905 	case WRPLL_REF_PCH_SSC:
906 		/*
907 		 * We could calculate spread here, but our checking
908 		 * code only cares about 5% accuracy, and spread is a max of
909 		 * 0.5% downspread.
910 		 */
911 		refclk = dev_priv->display.dpll.ref_clks.ssc;
912 		break;
913 	case WRPLL_REF_LCPLL:
914 		refclk = 2700000;
915 		break;
916 	default:
917 		MISSING_CASE(wrpll);
918 		return 0;
919 	}
920 
921 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
922 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
923 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
924 
925 	/* Convert to KHz, p & r have a fixed point portion */
926 	return (refclk * n / 10) / (p * r) * 2;
927 }
928 
929 static int
930 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
931 			   struct intel_crtc *crtc)
932 {
933 	struct drm_i915_private *i915 = to_i915(state->base.dev);
934 	struct intel_crtc_state *crtc_state =
935 		intel_atomic_get_new_crtc_state(state, crtc);
936 	unsigned int p, n2, r2;
937 
938 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
939 
940 	crtc_state->dpll_hw_state.wrpll =
941 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
942 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
943 		WRPLL_DIVIDER_POST(p);
944 
945 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
946 							&crtc_state->dpll_hw_state);
947 
948 	return 0;
949 }
950 
951 static struct intel_shared_dpll *
952 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
953 		       struct intel_crtc *crtc)
954 {
955 	struct intel_crtc_state *crtc_state =
956 		intel_atomic_get_new_crtc_state(state, crtc);
957 
958 	return intel_find_shared_dpll(state, crtc,
959 				      &crtc_state->dpll_hw_state,
960 				      BIT(DPLL_ID_WRPLL2) |
961 				      BIT(DPLL_ID_WRPLL1));
962 }
963 
964 static int
965 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
966 {
967 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
968 	int clock = crtc_state->port_clock;
969 
970 	switch (clock / 2) {
971 	case 81000:
972 	case 135000:
973 	case 270000:
974 		return 0;
975 	default:
976 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
977 			    clock);
978 		return -EINVAL;
979 	}
980 }
981 
982 static struct intel_shared_dpll *
983 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
984 {
985 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
986 	struct intel_shared_dpll *pll;
987 	enum intel_dpll_id pll_id;
988 	int clock = crtc_state->port_clock;
989 
990 	switch (clock / 2) {
991 	case 81000:
992 		pll_id = DPLL_ID_LCPLL_810;
993 		break;
994 	case 135000:
995 		pll_id = DPLL_ID_LCPLL_1350;
996 		break;
997 	case 270000:
998 		pll_id = DPLL_ID_LCPLL_2700;
999 		break;
1000 	default:
1001 		MISSING_CASE(clock / 2);
1002 		return NULL;
1003 	}
1004 
1005 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1006 
1007 	if (!pll)
1008 		return NULL;
1009 
1010 	return pll;
1011 }
1012 
1013 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1014 				  const struct intel_shared_dpll *pll,
1015 				  const struct intel_dpll_hw_state *pll_state)
1016 {
1017 	int link_clock = 0;
1018 
1019 	switch (pll->info->id) {
1020 	case DPLL_ID_LCPLL_810:
1021 		link_clock = 81000;
1022 		break;
1023 	case DPLL_ID_LCPLL_1350:
1024 		link_clock = 135000;
1025 		break;
1026 	case DPLL_ID_LCPLL_2700:
1027 		link_clock = 270000;
1028 		break;
1029 	default:
1030 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1031 		break;
1032 	}
1033 
1034 	return link_clock * 2;
1035 }
1036 
1037 static int
1038 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1039 			  struct intel_crtc *crtc)
1040 {
1041 	struct intel_crtc_state *crtc_state =
1042 		intel_atomic_get_new_crtc_state(state, crtc);
1043 
1044 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1045 		return -EINVAL;
1046 
1047 	crtc_state->dpll_hw_state.spll =
1048 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1049 
1050 	return 0;
1051 }
1052 
1053 static struct intel_shared_dpll *
1054 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1055 		      struct intel_crtc *crtc)
1056 {
1057 	struct intel_crtc_state *crtc_state =
1058 		intel_atomic_get_new_crtc_state(state, crtc);
1059 
1060 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1061 				      BIT(DPLL_ID_SPLL));
1062 }
1063 
1064 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1065 				 const struct intel_shared_dpll *pll,
1066 				 const struct intel_dpll_hw_state *pll_state)
1067 {
1068 	int link_clock = 0;
1069 
1070 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1071 	case SPLL_FREQ_810MHz:
1072 		link_clock = 81000;
1073 		break;
1074 	case SPLL_FREQ_1350MHz:
1075 		link_clock = 135000;
1076 		break;
1077 	case SPLL_FREQ_2700MHz:
1078 		link_clock = 270000;
1079 		break;
1080 	default:
1081 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1082 		break;
1083 	}
1084 
1085 	return link_clock * 2;
1086 }
1087 
1088 static int hsw_compute_dpll(struct intel_atomic_state *state,
1089 			    struct intel_crtc *crtc,
1090 			    struct intel_encoder *encoder)
1091 {
1092 	struct intel_crtc_state *crtc_state =
1093 		intel_atomic_get_new_crtc_state(state, crtc);
1094 
1095 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1096 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1097 	else if (intel_crtc_has_dp_encoder(crtc_state))
1098 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1099 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1100 		return hsw_ddi_spll_compute_dpll(state, crtc);
1101 	else
1102 		return -EINVAL;
1103 }
1104 
1105 static int hsw_get_dpll(struct intel_atomic_state *state,
1106 			struct intel_crtc *crtc,
1107 			struct intel_encoder *encoder)
1108 {
1109 	struct intel_crtc_state *crtc_state =
1110 		intel_atomic_get_new_crtc_state(state, crtc);
1111 	struct intel_shared_dpll *pll = NULL;
1112 
1113 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1114 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1115 	else if (intel_crtc_has_dp_encoder(crtc_state))
1116 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1117 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1118 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1119 
1120 	if (!pll)
1121 		return -EINVAL;
1122 
1123 	intel_reference_shared_dpll(state, crtc,
1124 				    pll, &crtc_state->dpll_hw_state);
1125 
1126 	crtc_state->shared_dpll = pll;
1127 
1128 	return 0;
1129 }
1130 
1131 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1132 {
1133 	i915->display.dpll.ref_clks.ssc = 135000;
1134 	/* Non-SSC is only used on non-ULT HSW. */
1135 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1136 		i915->display.dpll.ref_clks.nssc = 24000;
1137 	else
1138 		i915->display.dpll.ref_clks.nssc = 135000;
1139 }
1140 
1141 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1142 			      const struct intel_dpll_hw_state *hw_state)
1143 {
1144 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1145 		    hw_state->wrpll, hw_state->spll);
1146 }
1147 
1148 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1149 	.enable = hsw_ddi_wrpll_enable,
1150 	.disable = hsw_ddi_wrpll_disable,
1151 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1152 	.get_freq = hsw_ddi_wrpll_get_freq,
1153 };
1154 
1155 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1156 	.enable = hsw_ddi_spll_enable,
1157 	.disable = hsw_ddi_spll_disable,
1158 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1159 	.get_freq = hsw_ddi_spll_get_freq,
1160 };
1161 
1162 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1163 				 struct intel_shared_dpll *pll)
1164 {
1165 }
1166 
1167 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1168 				  struct intel_shared_dpll *pll)
1169 {
1170 }
1171 
1172 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1173 				       struct intel_shared_dpll *pll,
1174 				       struct intel_dpll_hw_state *hw_state)
1175 {
1176 	return true;
1177 }
1178 
1179 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1180 	.enable = hsw_ddi_lcpll_enable,
1181 	.disable = hsw_ddi_lcpll_disable,
1182 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1183 	.get_freq = hsw_ddi_lcpll_get_freq,
1184 };
1185 
1186 static const struct dpll_info hsw_plls[] = {
1187 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1188 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1189 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1190 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1191 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1192 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1193 	{ },
1194 };
1195 
1196 static const struct intel_dpll_mgr hsw_pll_mgr = {
1197 	.dpll_info = hsw_plls,
1198 	.compute_dplls = hsw_compute_dpll,
1199 	.get_dplls = hsw_get_dpll,
1200 	.put_dplls = intel_put_dpll,
1201 	.update_ref_clks = hsw_update_dpll_ref_clks,
1202 	.dump_hw_state = hsw_dump_hw_state,
1203 };
1204 
1205 struct skl_dpll_regs {
1206 	i915_reg_t ctl, cfgcr1, cfgcr2;
1207 };
1208 
1209 /* this array is indexed by the *shared* pll id */
1210 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1211 	{
1212 		/* DPLL 0 */
1213 		.ctl = LCPLL1_CTL,
1214 		/* DPLL 0 doesn't support HDMI mode */
1215 	},
1216 	{
1217 		/* DPLL 1 */
1218 		.ctl = LCPLL2_CTL,
1219 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1220 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1221 	},
1222 	{
1223 		/* DPLL 2 */
1224 		.ctl = WRPLL_CTL(0),
1225 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1226 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1227 	},
1228 	{
1229 		/* DPLL 3 */
1230 		.ctl = WRPLL_CTL(1),
1231 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1232 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1233 	},
1234 };
1235 
1236 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1237 				    struct intel_shared_dpll *pll)
1238 {
1239 	const enum intel_dpll_id id = pll->info->id;
1240 	u32 val;
1241 
1242 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1243 
1244 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1245 		 DPLL_CTRL1_SSC(id) |
1246 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1247 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1248 
1249 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1250 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1251 }
1252 
1253 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1254 			       struct intel_shared_dpll *pll)
1255 {
1256 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1257 	const enum intel_dpll_id id = pll->info->id;
1258 
1259 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1260 
1261 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1262 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1263 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1264 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1265 
1266 	/* the enable bit is always bit 31 */
1267 	intel_de_write(dev_priv, regs[id].ctl,
1268 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1269 
1270 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1271 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1272 }
1273 
1274 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1275 				 struct intel_shared_dpll *pll)
1276 {
1277 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1278 }
1279 
1280 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1281 				struct intel_shared_dpll *pll)
1282 {
1283 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1284 	const enum intel_dpll_id id = pll->info->id;
1285 
1286 	/* the enable bit is always bit 31 */
1287 	intel_de_write(dev_priv, regs[id].ctl,
1288 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1289 	intel_de_posting_read(dev_priv, regs[id].ctl);
1290 }
1291 
1292 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1293 				  struct intel_shared_dpll *pll)
1294 {
1295 }
1296 
1297 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1298 				     struct intel_shared_dpll *pll,
1299 				     struct intel_dpll_hw_state *hw_state)
1300 {
1301 	u32 val;
1302 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1303 	const enum intel_dpll_id id = pll->info->id;
1304 	intel_wakeref_t wakeref;
1305 	bool ret;
1306 
1307 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1308 						     POWER_DOMAIN_DISPLAY_CORE);
1309 	if (!wakeref)
1310 		return false;
1311 
1312 	ret = false;
1313 
1314 	val = intel_de_read(dev_priv, regs[id].ctl);
1315 	if (!(val & LCPLL_PLL_ENABLE))
1316 		goto out;
1317 
1318 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1319 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1320 
1321 	/* avoid reading back stale values if HDMI mode is not enabled */
1322 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1323 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1324 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1325 	}
1326 	ret = true;
1327 
1328 out:
1329 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1330 
1331 	return ret;
1332 }
1333 
1334 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1335 				       struct intel_shared_dpll *pll,
1336 				       struct intel_dpll_hw_state *hw_state)
1337 {
1338 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1339 	const enum intel_dpll_id id = pll->info->id;
1340 	intel_wakeref_t wakeref;
1341 	u32 val;
1342 	bool ret;
1343 
1344 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1345 						     POWER_DOMAIN_DISPLAY_CORE);
1346 	if (!wakeref)
1347 		return false;
1348 
1349 	ret = false;
1350 
1351 	/* DPLL0 is always enabled since it drives CDCLK */
1352 	val = intel_de_read(dev_priv, regs[id].ctl);
1353 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1354 		goto out;
1355 
1356 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1357 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1358 
1359 	ret = true;
1360 
1361 out:
1362 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1363 
1364 	return ret;
1365 }
1366 
1367 struct skl_wrpll_context {
1368 	u64 min_deviation;		/* current minimal deviation */
1369 	u64 central_freq;		/* chosen central freq */
1370 	u64 dco_freq;			/* chosen dco freq */
1371 	unsigned int p;			/* chosen divider */
1372 };
1373 
1374 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1375 #define SKL_DCO_MAX_PDEVIATION	100
1376 #define SKL_DCO_MAX_NDEVIATION	600
1377 
1378 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1379 				  u64 central_freq,
1380 				  u64 dco_freq,
1381 				  unsigned int divider)
1382 {
1383 	u64 deviation;
1384 
1385 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1386 			      central_freq);
1387 
1388 	/* positive deviation */
1389 	if (dco_freq >= central_freq) {
1390 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1391 		    deviation < ctx->min_deviation) {
1392 			ctx->min_deviation = deviation;
1393 			ctx->central_freq = central_freq;
1394 			ctx->dco_freq = dco_freq;
1395 			ctx->p = divider;
1396 		}
1397 	/* negative deviation */
1398 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1399 		   deviation < ctx->min_deviation) {
1400 		ctx->min_deviation = deviation;
1401 		ctx->central_freq = central_freq;
1402 		ctx->dco_freq = dco_freq;
1403 		ctx->p = divider;
1404 	}
1405 }
1406 
1407 static void skl_wrpll_get_multipliers(unsigned int p,
1408 				      unsigned int *p0 /* out */,
1409 				      unsigned int *p1 /* out */,
1410 				      unsigned int *p2 /* out */)
1411 {
1412 	/* even dividers */
1413 	if (p % 2 == 0) {
1414 		unsigned int half = p / 2;
1415 
1416 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1417 			*p0 = 2;
1418 			*p1 = 1;
1419 			*p2 = half;
1420 		} else if (half % 2 == 0) {
1421 			*p0 = 2;
1422 			*p1 = half / 2;
1423 			*p2 = 2;
1424 		} else if (half % 3 == 0) {
1425 			*p0 = 3;
1426 			*p1 = half / 3;
1427 			*p2 = 2;
1428 		} else if (half % 7 == 0) {
1429 			*p0 = 7;
1430 			*p1 = half / 7;
1431 			*p2 = 2;
1432 		}
1433 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1434 		*p0 = 3;
1435 		*p1 = 1;
1436 		*p2 = p / 3;
1437 	} else if (p == 5 || p == 7) {
1438 		*p0 = p;
1439 		*p1 = 1;
1440 		*p2 = 1;
1441 	} else if (p == 15) {
1442 		*p0 = 3;
1443 		*p1 = 1;
1444 		*p2 = 5;
1445 	} else if (p == 21) {
1446 		*p0 = 7;
1447 		*p1 = 1;
1448 		*p2 = 3;
1449 	} else if (p == 35) {
1450 		*p0 = 7;
1451 		*p1 = 1;
1452 		*p2 = 5;
1453 	}
1454 }
1455 
1456 struct skl_wrpll_params {
1457 	u32 dco_fraction;
1458 	u32 dco_integer;
1459 	u32 qdiv_ratio;
1460 	u32 qdiv_mode;
1461 	u32 kdiv;
1462 	u32 pdiv;
1463 	u32 central_freq;
1464 };
1465 
1466 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1467 				      u64 afe_clock,
1468 				      int ref_clock,
1469 				      u64 central_freq,
1470 				      u32 p0, u32 p1, u32 p2)
1471 {
1472 	u64 dco_freq;
1473 
1474 	switch (central_freq) {
1475 	case 9600000000ULL:
1476 		params->central_freq = 0;
1477 		break;
1478 	case 9000000000ULL:
1479 		params->central_freq = 1;
1480 		break;
1481 	case 8400000000ULL:
1482 		params->central_freq = 3;
1483 	}
1484 
1485 	switch (p0) {
1486 	case 1:
1487 		params->pdiv = 0;
1488 		break;
1489 	case 2:
1490 		params->pdiv = 1;
1491 		break;
1492 	case 3:
1493 		params->pdiv = 2;
1494 		break;
1495 	case 7:
1496 		params->pdiv = 4;
1497 		break;
1498 	default:
1499 		WARN(1, "Incorrect PDiv\n");
1500 	}
1501 
1502 	switch (p2) {
1503 	case 5:
1504 		params->kdiv = 0;
1505 		break;
1506 	case 2:
1507 		params->kdiv = 1;
1508 		break;
1509 	case 3:
1510 		params->kdiv = 2;
1511 		break;
1512 	case 1:
1513 		params->kdiv = 3;
1514 		break;
1515 	default:
1516 		WARN(1, "Incorrect KDiv\n");
1517 	}
1518 
1519 	params->qdiv_ratio = p1;
1520 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1521 
1522 	dco_freq = p0 * p1 * p2 * afe_clock;
1523 
1524 	/*
1525 	 * Intermediate values are in Hz.
1526 	 * Divide by MHz to match bsepc
1527 	 */
1528 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1529 	params->dco_fraction =
1530 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1531 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1532 }
1533 
1534 static int
1535 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1536 			int ref_clock,
1537 			struct skl_wrpll_params *wrpll_params)
1538 {
1539 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1540 						 9000000000ULL,
1541 						 9600000000ULL };
1542 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1543 					    24, 28, 30, 32, 36, 40, 42, 44,
1544 					    48, 52, 54, 56, 60, 64, 66, 68,
1545 					    70, 72, 76, 78, 80, 84, 88, 90,
1546 					    92, 96, 98 };
1547 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1548 	static const struct {
1549 		const u8 *list;
1550 		int n_dividers;
1551 	} dividers[] = {
1552 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1553 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1554 	};
1555 	struct skl_wrpll_context ctx = {
1556 		.min_deviation = U64_MAX,
1557 	};
1558 	unsigned int dco, d, i;
1559 	unsigned int p0, p1, p2;
1560 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1561 
1562 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1563 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1564 			for (i = 0; i < dividers[d].n_dividers; i++) {
1565 				unsigned int p = dividers[d].list[i];
1566 				u64 dco_freq = p * afe_clock;
1567 
1568 				skl_wrpll_try_divider(&ctx,
1569 						      dco_central_freq[dco],
1570 						      dco_freq,
1571 						      p);
1572 				/*
1573 				 * Skip the remaining dividers if we're sure to
1574 				 * have found the definitive divider, we can't
1575 				 * improve a 0 deviation.
1576 				 */
1577 				if (ctx.min_deviation == 0)
1578 					goto skip_remaining_dividers;
1579 			}
1580 		}
1581 
1582 skip_remaining_dividers:
1583 		/*
1584 		 * If a solution is found with an even divider, prefer
1585 		 * this one.
1586 		 */
1587 		if (d == 0 && ctx.p)
1588 			break;
1589 	}
1590 
1591 	if (!ctx.p)
1592 		return -EINVAL;
1593 
1594 	/*
1595 	 * gcc incorrectly analyses that these can be used without being
1596 	 * initialized. To be fair, it's hard to guess.
1597 	 */
1598 	p0 = p1 = p2 = 0;
1599 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1600 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1601 				  ctx.central_freq, p0, p1, p2);
1602 
1603 	return 0;
1604 }
1605 
1606 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1607 				  const struct intel_shared_dpll *pll,
1608 				  const struct intel_dpll_hw_state *pll_state)
1609 {
1610 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1611 	u32 p0, p1, p2, dco_freq;
1612 
1613 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1614 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1615 
1616 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1617 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1618 	else
1619 		p1 = 1;
1620 
1621 
1622 	switch (p0) {
1623 	case DPLL_CFGCR2_PDIV_1:
1624 		p0 = 1;
1625 		break;
1626 	case DPLL_CFGCR2_PDIV_2:
1627 		p0 = 2;
1628 		break;
1629 	case DPLL_CFGCR2_PDIV_3:
1630 		p0 = 3;
1631 		break;
1632 	case DPLL_CFGCR2_PDIV_7_INVALID:
1633 		/*
1634 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1635 		 * handling it the same way as PDIV_7.
1636 		 */
1637 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1638 		fallthrough;
1639 	case DPLL_CFGCR2_PDIV_7:
1640 		p0 = 7;
1641 		break;
1642 	default:
1643 		MISSING_CASE(p0);
1644 		return 0;
1645 	}
1646 
1647 	switch (p2) {
1648 	case DPLL_CFGCR2_KDIV_5:
1649 		p2 = 5;
1650 		break;
1651 	case DPLL_CFGCR2_KDIV_2:
1652 		p2 = 2;
1653 		break;
1654 	case DPLL_CFGCR2_KDIV_3:
1655 		p2 = 3;
1656 		break;
1657 	case DPLL_CFGCR2_KDIV_1:
1658 		p2 = 1;
1659 		break;
1660 	default:
1661 		MISSING_CASE(p2);
1662 		return 0;
1663 	}
1664 
1665 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1666 		   ref_clock;
1667 
1668 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1669 		    ref_clock / 0x8000;
1670 
1671 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1672 		return 0;
1673 
1674 	return dco_freq / (p0 * p1 * p2 * 5);
1675 }
1676 
1677 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1678 {
1679 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1680 	struct skl_wrpll_params wrpll_params = {};
1681 	u32 ctrl1, cfgcr1, cfgcr2;
1682 	int ret;
1683 
1684 	/*
1685 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1686 	 * as the DPLL id in this function.
1687 	 */
1688 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1689 
1690 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1691 
1692 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1693 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1694 	if (ret)
1695 		return ret;
1696 
1697 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1698 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1699 		wrpll_params.dco_integer;
1700 
1701 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1702 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1703 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1704 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1705 		wrpll_params.central_freq;
1706 
1707 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1708 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1709 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1710 
1711 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1712 							&crtc_state->dpll_hw_state);
1713 
1714 	return 0;
1715 }
1716 
1717 static int
1718 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1719 {
1720 	u32 ctrl1;
1721 
1722 	/*
1723 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1724 	 * as the DPLL id in this function.
1725 	 */
1726 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1727 	switch (crtc_state->port_clock / 2) {
1728 	case 81000:
1729 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1730 		break;
1731 	case 135000:
1732 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1733 		break;
1734 	case 270000:
1735 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1736 		break;
1737 		/* eDP 1.4 rates */
1738 	case 162000:
1739 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1740 		break;
1741 	case 108000:
1742 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1743 		break;
1744 	case 216000:
1745 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1746 		break;
1747 	}
1748 
1749 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1750 
1751 	return 0;
1752 }
1753 
1754 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1755 				  const struct intel_shared_dpll *pll,
1756 				  const struct intel_dpll_hw_state *pll_state)
1757 {
1758 	int link_clock = 0;
1759 
1760 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1761 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1762 	case DPLL_CTRL1_LINK_RATE_810:
1763 		link_clock = 81000;
1764 		break;
1765 	case DPLL_CTRL1_LINK_RATE_1080:
1766 		link_clock = 108000;
1767 		break;
1768 	case DPLL_CTRL1_LINK_RATE_1350:
1769 		link_clock = 135000;
1770 		break;
1771 	case DPLL_CTRL1_LINK_RATE_1620:
1772 		link_clock = 162000;
1773 		break;
1774 	case DPLL_CTRL1_LINK_RATE_2160:
1775 		link_clock = 216000;
1776 		break;
1777 	case DPLL_CTRL1_LINK_RATE_2700:
1778 		link_clock = 270000;
1779 		break;
1780 	default:
1781 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1782 		break;
1783 	}
1784 
1785 	return link_clock * 2;
1786 }
1787 
1788 static int skl_compute_dpll(struct intel_atomic_state *state,
1789 			    struct intel_crtc *crtc,
1790 			    struct intel_encoder *encoder)
1791 {
1792 	struct intel_crtc_state *crtc_state =
1793 		intel_atomic_get_new_crtc_state(state, crtc);
1794 
1795 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1796 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1797 	else if (intel_crtc_has_dp_encoder(crtc_state))
1798 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1799 	else
1800 		return -EINVAL;
1801 }
1802 
1803 static int skl_get_dpll(struct intel_atomic_state *state,
1804 			struct intel_crtc *crtc,
1805 			struct intel_encoder *encoder)
1806 {
1807 	struct intel_crtc_state *crtc_state =
1808 		intel_atomic_get_new_crtc_state(state, crtc);
1809 	struct intel_shared_dpll *pll;
1810 
1811 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1812 		pll = intel_find_shared_dpll(state, crtc,
1813 					     &crtc_state->dpll_hw_state,
1814 					     BIT(DPLL_ID_SKL_DPLL0));
1815 	else
1816 		pll = intel_find_shared_dpll(state, crtc,
1817 					     &crtc_state->dpll_hw_state,
1818 					     BIT(DPLL_ID_SKL_DPLL3) |
1819 					     BIT(DPLL_ID_SKL_DPLL2) |
1820 					     BIT(DPLL_ID_SKL_DPLL1));
1821 	if (!pll)
1822 		return -EINVAL;
1823 
1824 	intel_reference_shared_dpll(state, crtc,
1825 				    pll, &crtc_state->dpll_hw_state);
1826 
1827 	crtc_state->shared_dpll = pll;
1828 
1829 	return 0;
1830 }
1831 
1832 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1833 				const struct intel_shared_dpll *pll,
1834 				const struct intel_dpll_hw_state *pll_state)
1835 {
1836 	/*
1837 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1838 	 * the internal shift for each field
1839 	 */
1840 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1841 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1842 	else
1843 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1844 }
1845 
1846 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1847 {
1848 	/* No SSC ref */
1849 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1850 }
1851 
1852 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1853 			      const struct intel_dpll_hw_state *hw_state)
1854 {
1855 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1856 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1857 		      hw_state->ctrl1,
1858 		      hw_state->cfgcr1,
1859 		      hw_state->cfgcr2);
1860 }
1861 
1862 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1863 	.enable = skl_ddi_pll_enable,
1864 	.disable = skl_ddi_pll_disable,
1865 	.get_hw_state = skl_ddi_pll_get_hw_state,
1866 	.get_freq = skl_ddi_pll_get_freq,
1867 };
1868 
1869 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1870 	.enable = skl_ddi_dpll0_enable,
1871 	.disable = skl_ddi_dpll0_disable,
1872 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1873 	.get_freq = skl_ddi_pll_get_freq,
1874 };
1875 
1876 static const struct dpll_info skl_plls[] = {
1877 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1878 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1879 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1880 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1881 	{ },
1882 };
1883 
1884 static const struct intel_dpll_mgr skl_pll_mgr = {
1885 	.dpll_info = skl_plls,
1886 	.compute_dplls = skl_compute_dpll,
1887 	.get_dplls = skl_get_dpll,
1888 	.put_dplls = intel_put_dpll,
1889 	.update_ref_clks = skl_update_dpll_ref_clks,
1890 	.dump_hw_state = skl_dump_hw_state,
1891 };
1892 
1893 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1894 				struct intel_shared_dpll *pll)
1895 {
1896 	u32 temp;
1897 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1898 	enum dpio_phy phy;
1899 	enum dpio_channel ch;
1900 
1901 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1902 
1903 	/* Non-SSC reference */
1904 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1905 	temp |= PORT_PLL_REF_SEL;
1906 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1907 
1908 	if (IS_GEMINILAKE(dev_priv)) {
1909 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1910 		temp |= PORT_PLL_POWER_ENABLE;
1911 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1912 
1913 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1914 				 PORT_PLL_POWER_STATE), 200))
1915 			drm_err(&dev_priv->drm,
1916 				"Power state not set for PLL:%d\n", port);
1917 	}
1918 
1919 	/* Disable 10 bit clock */
1920 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1921 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1922 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1923 
1924 	/* Write P1 & P2 */
1925 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1926 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1927 	temp |= pll->state.hw_state.ebb0;
1928 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1929 
1930 	/* Write M2 integer */
1931 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1932 	temp &= ~PORT_PLL_M2_INT_MASK;
1933 	temp |= pll->state.hw_state.pll0;
1934 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1935 
1936 	/* Write N */
1937 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1938 	temp &= ~PORT_PLL_N_MASK;
1939 	temp |= pll->state.hw_state.pll1;
1940 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1941 
1942 	/* Write M2 fraction */
1943 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1944 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1945 	temp |= pll->state.hw_state.pll2;
1946 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1947 
1948 	/* Write M2 fraction enable */
1949 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1950 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1951 	temp |= pll->state.hw_state.pll3;
1952 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1953 
1954 	/* Write coeff */
1955 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1956 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1957 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1958 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1959 	temp |= pll->state.hw_state.pll6;
1960 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1961 
1962 	/* Write calibration val */
1963 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1964 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1965 	temp |= pll->state.hw_state.pll8;
1966 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1967 
1968 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1969 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1970 	temp |= pll->state.hw_state.pll9;
1971 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1972 
1973 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1974 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1975 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1976 	temp |= pll->state.hw_state.pll10;
1977 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1978 
1979 	/* Recalibrate with new settings */
1980 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1981 	temp |= PORT_PLL_RECALIBRATE;
1982 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1983 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1984 	temp |= pll->state.hw_state.ebb4;
1985 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1986 
1987 	/* Enable PLL */
1988 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1989 	temp |= PORT_PLL_ENABLE;
1990 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1991 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1992 
1993 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1994 			200))
1995 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1996 
1997 	if (IS_GEMINILAKE(dev_priv)) {
1998 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1999 		temp |= DCC_DELAY_RANGE_2;
2000 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2001 	}
2002 
2003 	/*
2004 	 * While we write to the group register to program all lanes at once we
2005 	 * can read only lane registers and we pick lanes 0/1 for that.
2006 	 */
2007 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2008 	temp &= ~LANE_STAGGER_MASK;
2009 	temp &= ~LANESTAGGER_STRAP_OVRD;
2010 	temp |= pll->state.hw_state.pcsdw12;
2011 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2012 }
2013 
2014 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2015 					struct intel_shared_dpll *pll)
2016 {
2017 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2018 	u32 temp;
2019 
2020 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2021 	temp &= ~PORT_PLL_ENABLE;
2022 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2023 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2024 
2025 	if (IS_GEMINILAKE(dev_priv)) {
2026 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2027 		temp &= ~PORT_PLL_POWER_ENABLE;
2028 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2029 
2030 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2031 				  PORT_PLL_POWER_STATE), 200))
2032 			drm_err(&dev_priv->drm,
2033 				"Power state not reset for PLL:%d\n", port);
2034 	}
2035 }
2036 
2037 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2038 					struct intel_shared_dpll *pll,
2039 					struct intel_dpll_hw_state *hw_state)
2040 {
2041 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2042 	intel_wakeref_t wakeref;
2043 	enum dpio_phy phy;
2044 	enum dpio_channel ch;
2045 	u32 val;
2046 	bool ret;
2047 
2048 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2049 
2050 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2051 						     POWER_DOMAIN_DISPLAY_CORE);
2052 	if (!wakeref)
2053 		return false;
2054 
2055 	ret = false;
2056 
2057 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2058 	if (!(val & PORT_PLL_ENABLE))
2059 		goto out;
2060 
2061 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2062 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2063 
2064 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2065 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2066 
2067 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2068 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2069 
2070 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2071 	hw_state->pll1 &= PORT_PLL_N_MASK;
2072 
2073 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2074 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2075 
2076 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2077 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2078 
2079 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2080 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2081 			  PORT_PLL_INT_COEFF_MASK |
2082 			  PORT_PLL_GAIN_CTL_MASK;
2083 
2084 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2085 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2086 
2087 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2088 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2089 
2090 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2091 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2092 			   PORT_PLL_DCO_AMP_MASK;
2093 
2094 	/*
2095 	 * While we write to the group register to program all lanes at once we
2096 	 * can read only lane registers. We configure all lanes the same way, so
2097 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2098 	 */
2099 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2100 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2101 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2102 		drm_dbg(&dev_priv->drm,
2103 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2104 			hw_state->pcsdw12,
2105 			intel_de_read(dev_priv,
2106 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2107 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2108 
2109 	ret = true;
2110 
2111 out:
2112 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2113 
2114 	return ret;
2115 }
2116 
2117 /* pre-calculated values for DP linkrates */
2118 static const struct dpll bxt_dp_clk_val[] = {
2119 	/* m2 is .22 binary fixed point */
2120 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2121 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2122 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2123 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2124 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2125 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2126 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2127 };
2128 
2129 static int
2130 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2131 			  struct dpll *clk_div)
2132 {
2133 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2134 
2135 	/* Calculate HDMI div */
2136 	/*
2137 	 * FIXME: tie the following calculation into
2138 	 * i9xx_crtc_compute_clock
2139 	 */
2140 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2141 		return -EINVAL;
2142 
2143 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2144 
2145 	return 0;
2146 }
2147 
2148 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2149 				    struct dpll *clk_div)
2150 {
2151 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2152 	int i;
2153 
2154 	*clk_div = bxt_dp_clk_val[0];
2155 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2156 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2157 			*clk_div = bxt_dp_clk_val[i];
2158 			break;
2159 		}
2160 	}
2161 
2162 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2163 
2164 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2165 		    clk_div->dot != crtc_state->port_clock);
2166 }
2167 
2168 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2169 				     const struct dpll *clk_div)
2170 {
2171 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2172 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2173 	int clock = crtc_state->port_clock;
2174 	int vco = clk_div->vco;
2175 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2176 	u32 lanestagger;
2177 
2178 	if (vco >= 6200000 && vco <= 6700000) {
2179 		prop_coef = 4;
2180 		int_coef = 9;
2181 		gain_ctl = 3;
2182 		targ_cnt = 8;
2183 	} else if ((vco > 5400000 && vco < 6200000) ||
2184 			(vco >= 4800000 && vco < 5400000)) {
2185 		prop_coef = 5;
2186 		int_coef = 11;
2187 		gain_ctl = 3;
2188 		targ_cnt = 9;
2189 	} else if (vco == 5400000) {
2190 		prop_coef = 3;
2191 		int_coef = 8;
2192 		gain_ctl = 1;
2193 		targ_cnt = 9;
2194 	} else {
2195 		drm_err(&i915->drm, "Invalid VCO\n");
2196 		return -EINVAL;
2197 	}
2198 
2199 	if (clock > 270000)
2200 		lanestagger = 0x18;
2201 	else if (clock > 135000)
2202 		lanestagger = 0x0d;
2203 	else if (clock > 67000)
2204 		lanestagger = 0x07;
2205 	else if (clock > 33000)
2206 		lanestagger = 0x04;
2207 	else
2208 		lanestagger = 0x02;
2209 
2210 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2211 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2212 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2213 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2214 
2215 	if (clk_div->m2 & 0x3fffff)
2216 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2217 
2218 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2219 		PORT_PLL_INT_COEFF(int_coef) |
2220 		PORT_PLL_GAIN_CTL(gain_ctl);
2221 
2222 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2223 
2224 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2225 
2226 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2227 		PORT_PLL_DCO_AMP_OVR_EN_H;
2228 
2229 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2230 
2231 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2232 
2233 	return 0;
2234 }
2235 
2236 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2237 				const struct intel_shared_dpll *pll,
2238 				const struct intel_dpll_hw_state *pll_state)
2239 {
2240 	struct dpll clock;
2241 
2242 	clock.m1 = 2;
2243 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2244 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2245 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2246 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2247 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2248 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2249 
2250 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2251 }
2252 
2253 static int
2254 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2255 {
2256 	struct dpll clk_div = {};
2257 
2258 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2259 
2260 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2261 }
2262 
2263 static int
2264 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2265 {
2266 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2267 	struct dpll clk_div = {};
2268 	int ret;
2269 
2270 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2271 
2272 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2273 	if (ret)
2274 		return ret;
2275 
2276 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2277 						      &crtc_state->dpll_hw_state);
2278 
2279 	return 0;
2280 }
2281 
2282 static int bxt_compute_dpll(struct intel_atomic_state *state,
2283 			    struct intel_crtc *crtc,
2284 			    struct intel_encoder *encoder)
2285 {
2286 	struct intel_crtc_state *crtc_state =
2287 		intel_atomic_get_new_crtc_state(state, crtc);
2288 
2289 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2290 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2291 	else if (intel_crtc_has_dp_encoder(crtc_state))
2292 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2293 	else
2294 		return -EINVAL;
2295 }
2296 
2297 static int bxt_get_dpll(struct intel_atomic_state *state,
2298 			struct intel_crtc *crtc,
2299 			struct intel_encoder *encoder)
2300 {
2301 	struct intel_crtc_state *crtc_state =
2302 		intel_atomic_get_new_crtc_state(state, crtc);
2303 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2304 	struct intel_shared_dpll *pll;
2305 	enum intel_dpll_id id;
2306 
2307 	/* 1:1 mapping between ports and PLLs */
2308 	id = (enum intel_dpll_id) encoder->port;
2309 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2310 
2311 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2312 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2313 
2314 	intel_reference_shared_dpll(state, crtc,
2315 				    pll, &crtc_state->dpll_hw_state);
2316 
2317 	crtc_state->shared_dpll = pll;
2318 
2319 	return 0;
2320 }
2321 
2322 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2323 {
2324 	i915->display.dpll.ref_clks.ssc = 100000;
2325 	i915->display.dpll.ref_clks.nssc = 100000;
2326 	/* DSI non-SSC ref 19.2MHz */
2327 }
2328 
2329 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2330 			      const struct intel_dpll_hw_state *hw_state)
2331 {
2332 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2333 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2334 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2335 		    hw_state->ebb0,
2336 		    hw_state->ebb4,
2337 		    hw_state->pll0,
2338 		    hw_state->pll1,
2339 		    hw_state->pll2,
2340 		    hw_state->pll3,
2341 		    hw_state->pll6,
2342 		    hw_state->pll8,
2343 		    hw_state->pll9,
2344 		    hw_state->pll10,
2345 		    hw_state->pcsdw12);
2346 }
2347 
2348 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2349 	.enable = bxt_ddi_pll_enable,
2350 	.disable = bxt_ddi_pll_disable,
2351 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2352 	.get_freq = bxt_ddi_pll_get_freq,
2353 };
2354 
2355 static const struct dpll_info bxt_plls[] = {
2356 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2357 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2358 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2359 	{ },
2360 };
2361 
2362 static const struct intel_dpll_mgr bxt_pll_mgr = {
2363 	.dpll_info = bxt_plls,
2364 	.compute_dplls = bxt_compute_dpll,
2365 	.get_dplls = bxt_get_dpll,
2366 	.put_dplls = intel_put_dpll,
2367 	.update_ref_clks = bxt_update_dpll_ref_clks,
2368 	.dump_hw_state = bxt_dump_hw_state,
2369 };
2370 
2371 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2372 				      int *qdiv, int *kdiv)
2373 {
2374 	/* even dividers */
2375 	if (bestdiv % 2 == 0) {
2376 		if (bestdiv == 2) {
2377 			*pdiv = 2;
2378 			*qdiv = 1;
2379 			*kdiv = 1;
2380 		} else if (bestdiv % 4 == 0) {
2381 			*pdiv = 2;
2382 			*qdiv = bestdiv / 4;
2383 			*kdiv = 2;
2384 		} else if (bestdiv % 6 == 0) {
2385 			*pdiv = 3;
2386 			*qdiv = bestdiv / 6;
2387 			*kdiv = 2;
2388 		} else if (bestdiv % 5 == 0) {
2389 			*pdiv = 5;
2390 			*qdiv = bestdiv / 10;
2391 			*kdiv = 2;
2392 		} else if (bestdiv % 14 == 0) {
2393 			*pdiv = 7;
2394 			*qdiv = bestdiv / 14;
2395 			*kdiv = 2;
2396 		}
2397 	} else {
2398 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2399 			*pdiv = bestdiv;
2400 			*qdiv = 1;
2401 			*kdiv = 1;
2402 		} else { /* 9, 15, 21 */
2403 			*pdiv = bestdiv / 3;
2404 			*qdiv = 1;
2405 			*kdiv = 3;
2406 		}
2407 	}
2408 }
2409 
2410 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2411 				      u32 dco_freq, u32 ref_freq,
2412 				      int pdiv, int qdiv, int kdiv)
2413 {
2414 	u32 dco;
2415 
2416 	switch (kdiv) {
2417 	case 1:
2418 		params->kdiv = 1;
2419 		break;
2420 	case 2:
2421 		params->kdiv = 2;
2422 		break;
2423 	case 3:
2424 		params->kdiv = 4;
2425 		break;
2426 	default:
2427 		WARN(1, "Incorrect KDiv\n");
2428 	}
2429 
2430 	switch (pdiv) {
2431 	case 2:
2432 		params->pdiv = 1;
2433 		break;
2434 	case 3:
2435 		params->pdiv = 2;
2436 		break;
2437 	case 5:
2438 		params->pdiv = 4;
2439 		break;
2440 	case 7:
2441 		params->pdiv = 8;
2442 		break;
2443 	default:
2444 		WARN(1, "Incorrect PDiv\n");
2445 	}
2446 
2447 	WARN_ON(kdiv != 2 && qdiv != 1);
2448 
2449 	params->qdiv_ratio = qdiv;
2450 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2451 
2452 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2453 
2454 	params->dco_integer = dco >> 15;
2455 	params->dco_fraction = dco & 0x7fff;
2456 }
2457 
2458 /*
2459  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2460  * Program half of the nominal DCO divider fraction value.
2461  */
2462 static bool
2463 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2464 {
2465 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2466 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2467 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2468 		 i915->display.dpll.ref_clks.nssc == 38400;
2469 }
2470 
2471 struct icl_combo_pll_params {
2472 	int clock;
2473 	struct skl_wrpll_params wrpll;
2474 };
2475 
2476 /*
2477  * These values alrea already adjusted: they're the bits we write to the
2478  * registers, not the logical values.
2479  */
2480 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2481 	{ 540000,
2482 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2483 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2484 	{ 270000,
2485 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2486 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2487 	{ 162000,
2488 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2489 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2490 	{ 324000,
2491 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2492 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2493 	{ 216000,
2494 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2495 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2496 	{ 432000,
2497 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2498 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2499 	{ 648000,
2500 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2501 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2502 	{ 810000,
2503 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2504 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2505 };
2506 
2507 
2508 /* Also used for 38.4 MHz values. */
2509 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2510 	{ 540000,
2511 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2512 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2513 	{ 270000,
2514 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2515 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2516 	{ 162000,
2517 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2518 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2519 	{ 324000,
2520 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2521 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2522 	{ 216000,
2523 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2524 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2525 	{ 432000,
2526 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2527 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2528 	{ 648000,
2529 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2530 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531 	{ 810000,
2532 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2533 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2534 };
2535 
2536 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2537 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2538 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2539 };
2540 
2541 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2542 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2543 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2544 };
2545 
2546 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2547 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2548 	/* the following params are unused */
2549 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2550 };
2551 
2552 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2553 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2554 	/* the following params are unused */
2555 };
2556 
2557 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2558 				 struct skl_wrpll_params *pll_params)
2559 {
2560 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2561 	const struct icl_combo_pll_params *params =
2562 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2563 		icl_dp_combo_pll_24MHz_values :
2564 		icl_dp_combo_pll_19_2MHz_values;
2565 	int clock = crtc_state->port_clock;
2566 	int i;
2567 
2568 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2569 		if (clock == params[i].clock) {
2570 			*pll_params = params[i].wrpll;
2571 			return 0;
2572 		}
2573 	}
2574 
2575 	MISSING_CASE(clock);
2576 	return -EINVAL;
2577 }
2578 
2579 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2580 			    struct skl_wrpll_params *pll_params)
2581 {
2582 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2583 
2584 	if (DISPLAY_VER(dev_priv) >= 12) {
2585 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2586 		default:
2587 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2588 			fallthrough;
2589 		case 19200:
2590 		case 38400:
2591 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2592 			break;
2593 		case 24000:
2594 			*pll_params = tgl_tbt_pll_24MHz_values;
2595 			break;
2596 		}
2597 	} else {
2598 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2599 		default:
2600 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2601 			fallthrough;
2602 		case 19200:
2603 		case 38400:
2604 			*pll_params = icl_tbt_pll_19_2MHz_values;
2605 			break;
2606 		case 24000:
2607 			*pll_params = icl_tbt_pll_24MHz_values;
2608 			break;
2609 		}
2610 	}
2611 
2612 	return 0;
2613 }
2614 
2615 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2616 				    const struct intel_shared_dpll *pll,
2617 				    const struct intel_dpll_hw_state *pll_state)
2618 {
2619 	/*
2620 	 * The PLL outputs multiple frequencies at the same time, selection is
2621 	 * made at DDI clock mux level.
2622 	 */
2623 	drm_WARN_ON(&i915->drm, 1);
2624 
2625 	return 0;
2626 }
2627 
2628 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2629 {
2630 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2631 
2632 	/*
2633 	 * For ICL+, the spec states: if reference frequency is 38.4,
2634 	 * use 19.2 because the DPLL automatically divides that by 2.
2635 	 */
2636 	if (ref_clock == 38400)
2637 		ref_clock = 19200;
2638 
2639 	return ref_clock;
2640 }
2641 
2642 static int
2643 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2644 	       struct skl_wrpll_params *wrpll_params)
2645 {
2646 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2647 	int ref_clock = icl_wrpll_ref_clock(i915);
2648 	u32 afe_clock = crtc_state->port_clock * 5;
2649 	u32 dco_min = 7998000;
2650 	u32 dco_max = 10000000;
2651 	u32 dco_mid = (dco_min + dco_max) / 2;
2652 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2653 					 18, 20, 24, 28, 30, 32,  36,  40,
2654 					 42, 44, 48, 50, 52, 54,  56,  60,
2655 					 64, 66, 68, 70, 72, 76,  78,  80,
2656 					 84, 88, 90, 92, 96, 98, 100, 102,
2657 					  3,  5,  7,  9, 15, 21 };
2658 	u32 dco, best_dco = 0, dco_centrality = 0;
2659 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2660 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2661 
2662 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2663 		dco = afe_clock * dividers[d];
2664 
2665 		if (dco <= dco_max && dco >= dco_min) {
2666 			dco_centrality = abs(dco - dco_mid);
2667 
2668 			if (dco_centrality < best_dco_centrality) {
2669 				best_dco_centrality = dco_centrality;
2670 				best_div = dividers[d];
2671 				best_dco = dco;
2672 			}
2673 		}
2674 	}
2675 
2676 	if (best_div == 0)
2677 		return -EINVAL;
2678 
2679 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2680 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2681 				  pdiv, qdiv, kdiv);
2682 
2683 	return 0;
2684 }
2685 
2686 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2687 				      const struct intel_shared_dpll *pll,
2688 				      const struct intel_dpll_hw_state *pll_state)
2689 {
2690 	int ref_clock = icl_wrpll_ref_clock(i915);
2691 	u32 dco_fraction;
2692 	u32 p0, p1, p2, dco_freq;
2693 
2694 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2695 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2696 
2697 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2698 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2699 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2700 	else
2701 		p1 = 1;
2702 
2703 	switch (p0) {
2704 	case DPLL_CFGCR1_PDIV_2:
2705 		p0 = 2;
2706 		break;
2707 	case DPLL_CFGCR1_PDIV_3:
2708 		p0 = 3;
2709 		break;
2710 	case DPLL_CFGCR1_PDIV_5:
2711 		p0 = 5;
2712 		break;
2713 	case DPLL_CFGCR1_PDIV_7:
2714 		p0 = 7;
2715 		break;
2716 	}
2717 
2718 	switch (p2) {
2719 	case DPLL_CFGCR1_KDIV_1:
2720 		p2 = 1;
2721 		break;
2722 	case DPLL_CFGCR1_KDIV_2:
2723 		p2 = 2;
2724 		break;
2725 	case DPLL_CFGCR1_KDIV_3:
2726 		p2 = 3;
2727 		break;
2728 	}
2729 
2730 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2731 		   ref_clock;
2732 
2733 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2734 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2735 
2736 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2737 		dco_fraction *= 2;
2738 
2739 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2740 
2741 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2742 		return 0;
2743 
2744 	return dco_freq / (p0 * p1 * p2 * 5);
2745 }
2746 
2747 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2748 				const struct skl_wrpll_params *pll_params,
2749 				struct intel_dpll_hw_state *pll_state)
2750 {
2751 	u32 dco_fraction = pll_params->dco_fraction;
2752 
2753 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2754 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2755 
2756 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2757 			    pll_params->dco_integer;
2758 
2759 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2760 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2761 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2762 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2763 
2764 	if (DISPLAY_VER(i915) >= 12)
2765 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2766 	else
2767 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2768 
2769 	if (i915->display.vbt.override_afc_startup)
2770 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2771 }
2772 
2773 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2774 				    u32 *target_dco_khz,
2775 				    struct intel_dpll_hw_state *state,
2776 				    bool is_dkl)
2777 {
2778 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2779 	u32 dco_min_freq, dco_max_freq;
2780 	unsigned int i;
2781 	int div2;
2782 
2783 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2784 	dco_max_freq = is_dp ? 8100000 : 10000000;
2785 
2786 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2787 		int div1 = div1_vals[i];
2788 
2789 		for (div2 = 10; div2 > 0; div2--) {
2790 			int dco = div1 * div2 * clock_khz * 5;
2791 			int a_divratio, tlinedrv, inputsel;
2792 			u32 hsdiv;
2793 
2794 			if (dco < dco_min_freq || dco > dco_max_freq)
2795 				continue;
2796 
2797 			if (div2 >= 2) {
2798 				/*
2799 				 * Note: a_divratio not matching TGL BSpec
2800 				 * algorithm but matching hardcoded values and
2801 				 * working on HW for DP alt-mode at least
2802 				 */
2803 				a_divratio = is_dp ? 10 : 5;
2804 				tlinedrv = is_dkl ? 1 : 2;
2805 			} else {
2806 				a_divratio = 5;
2807 				tlinedrv = 0;
2808 			}
2809 			inputsel = is_dp ? 0 : 1;
2810 
2811 			switch (div1) {
2812 			default:
2813 				MISSING_CASE(div1);
2814 				fallthrough;
2815 			case 2:
2816 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2817 				break;
2818 			case 3:
2819 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2820 				break;
2821 			case 5:
2822 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2823 				break;
2824 			case 7:
2825 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2826 				break;
2827 			}
2828 
2829 			*target_dco_khz = dco;
2830 
2831 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2832 
2833 			state->mg_clktop2_coreclkctl1 =
2834 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2835 
2836 			state->mg_clktop2_hsclkctl =
2837 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2838 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2839 				hsdiv |
2840 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2841 
2842 			return 0;
2843 		}
2844 	}
2845 
2846 	return -EINVAL;
2847 }
2848 
2849 /*
2850  * The specification for this function uses real numbers, so the math had to be
2851  * adapted to integer-only calculation, that's why it looks so different.
2852  */
2853 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2854 				 struct intel_dpll_hw_state *pll_state)
2855 {
2856 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2857 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2858 	int clock = crtc_state->port_clock;
2859 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2860 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2861 	u32 prop_coeff, int_coeff;
2862 	u32 tdc_targetcnt, feedfwgain;
2863 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2864 	u64 tmp;
2865 	bool use_ssc = false;
2866 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2867 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2868 	int ret;
2869 
2870 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2871 				       pll_state, is_dkl);
2872 	if (ret)
2873 		return ret;
2874 
2875 	m1div = 2;
2876 	m2div_int = dco_khz / (refclk_khz * m1div);
2877 	if (m2div_int > 255) {
2878 		if (!is_dkl) {
2879 			m1div = 4;
2880 			m2div_int = dco_khz / (refclk_khz * m1div);
2881 		}
2882 
2883 		if (m2div_int > 255)
2884 			return -EINVAL;
2885 	}
2886 	m2div_rem = dco_khz % (refclk_khz * m1div);
2887 
2888 	tmp = (u64)m2div_rem * (1 << 22);
2889 	do_div(tmp, refclk_khz * m1div);
2890 	m2div_frac = tmp;
2891 
2892 	switch (refclk_khz) {
2893 	case 19200:
2894 		iref_ndiv = 1;
2895 		iref_trim = 28;
2896 		iref_pulse_w = 1;
2897 		break;
2898 	case 24000:
2899 		iref_ndiv = 1;
2900 		iref_trim = 25;
2901 		iref_pulse_w = 2;
2902 		break;
2903 	case 38400:
2904 		iref_ndiv = 2;
2905 		iref_trim = 28;
2906 		iref_pulse_w = 1;
2907 		break;
2908 	default:
2909 		MISSING_CASE(refclk_khz);
2910 		return -EINVAL;
2911 	}
2912 
2913 	/*
2914 	 * tdc_res = 0.000003
2915 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2916 	 *
2917 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2918 	 * was supposed to be a division, but we rearranged the operations of
2919 	 * the formula to avoid early divisions so we don't multiply the
2920 	 * rounding errors.
2921 	 *
2922 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2923 	 * we also rearrange to work with integers.
2924 	 *
2925 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2926 	 * last division by 10.
2927 	 */
2928 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2929 
2930 	/*
2931 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2932 	 * 32 bits. That's not a problem since we round the division down
2933 	 * anyway.
2934 	 */
2935 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2936 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2937 
2938 	if (dco_khz >= 9000000) {
2939 		prop_coeff = 5;
2940 		int_coeff = 10;
2941 	} else {
2942 		prop_coeff = 4;
2943 		int_coeff = 8;
2944 	}
2945 
2946 	if (use_ssc) {
2947 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2948 		do_div(tmp, refclk_khz * m1div * 10000);
2949 		ssc_stepsize = tmp;
2950 
2951 		tmp = mul_u32_u32(dco_khz, 1000);
2952 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2953 	} else {
2954 		ssc_stepsize = 0;
2955 		ssc_steplen = 0;
2956 	}
2957 	ssc_steplog = 4;
2958 
2959 	/* write pll_state calculations */
2960 	if (is_dkl) {
2961 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2962 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2963 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2964 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2965 		if (dev_priv->display.vbt.override_afc_startup) {
2966 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2967 
2968 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2969 		}
2970 
2971 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2972 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2973 
2974 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2975 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2976 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2977 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2978 
2979 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2980 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2981 
2982 		pll_state->mg_pll_tdc_coldst_bias =
2983 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2984 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2985 
2986 	} else {
2987 		pll_state->mg_pll_div0 =
2988 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2989 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2990 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2991 
2992 		pll_state->mg_pll_div1 =
2993 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2994 			MG_PLL_DIV1_DITHER_DIV_2 |
2995 			MG_PLL_DIV1_NDIVRATIO(1) |
2996 			MG_PLL_DIV1_FBPREDIV(m1div);
2997 
2998 		pll_state->mg_pll_lf =
2999 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3000 			MG_PLL_LF_AFCCNTSEL_512 |
3001 			MG_PLL_LF_GAINCTRL(1) |
3002 			MG_PLL_LF_INT_COEFF(int_coeff) |
3003 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3004 
3005 		pll_state->mg_pll_frac_lock =
3006 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3007 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3008 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3009 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3010 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3011 		if (use_ssc || m2div_rem > 0)
3012 			pll_state->mg_pll_frac_lock |=
3013 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3014 
3015 		pll_state->mg_pll_ssc =
3016 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3017 			MG_PLL_SSC_TYPE(2) |
3018 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3019 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3020 			MG_PLL_SSC_FLLEN |
3021 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3022 
3023 		pll_state->mg_pll_tdc_coldst_bias =
3024 			MG_PLL_TDC_COLDST_COLDSTART |
3025 			MG_PLL_TDC_COLDST_IREFINT_EN |
3026 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3027 			MG_PLL_TDC_TDCOVCCORR_EN |
3028 			MG_PLL_TDC_TDCSEL(3);
3029 
3030 		pll_state->mg_pll_bias =
3031 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3032 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3033 			MG_PLL_BIAS_BIAS_BONUS(10) |
3034 			MG_PLL_BIAS_BIASCAL_EN |
3035 			MG_PLL_BIAS_CTRIM(12) |
3036 			MG_PLL_BIAS_VREF_RDAC(4) |
3037 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3038 
3039 		if (refclk_khz == 38400) {
3040 			pll_state->mg_pll_tdc_coldst_bias_mask =
3041 				MG_PLL_TDC_COLDST_COLDSTART;
3042 			pll_state->mg_pll_bias_mask = 0;
3043 		} else {
3044 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3045 			pll_state->mg_pll_bias_mask = -1U;
3046 		}
3047 
3048 		pll_state->mg_pll_tdc_coldst_bias &=
3049 			pll_state->mg_pll_tdc_coldst_bias_mask;
3050 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3051 	}
3052 
3053 	return 0;
3054 }
3055 
3056 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3057 				   const struct intel_shared_dpll *pll,
3058 				   const struct intel_dpll_hw_state *pll_state)
3059 {
3060 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3061 	u64 tmp;
3062 
3063 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3064 
3065 	if (DISPLAY_VER(dev_priv) >= 12) {
3066 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3067 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3068 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3069 
3070 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3071 			m2_frac = pll_state->mg_pll_bias &
3072 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3073 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3074 		} else {
3075 			m2_frac = 0;
3076 		}
3077 	} else {
3078 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3079 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3080 
3081 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3082 			m2_frac = pll_state->mg_pll_div0 &
3083 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3084 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3085 		} else {
3086 			m2_frac = 0;
3087 		}
3088 	}
3089 
3090 	switch (pll_state->mg_clktop2_hsclkctl &
3091 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3092 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3093 		div1 = 2;
3094 		break;
3095 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3096 		div1 = 3;
3097 		break;
3098 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3099 		div1 = 5;
3100 		break;
3101 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3102 		div1 = 7;
3103 		break;
3104 	default:
3105 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3106 		return 0;
3107 	}
3108 
3109 	div2 = (pll_state->mg_clktop2_hsclkctl &
3110 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3111 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3112 
3113 	/* div2 value of 0 is same as 1 means no div */
3114 	if (div2 == 0)
3115 		div2 = 1;
3116 
3117 	/*
3118 	 * Adjust the original formula to delay the division by 2^22 in order to
3119 	 * minimize possible rounding errors.
3120 	 */
3121 	tmp = (u64)m1 * m2_int * ref_clock +
3122 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3123 	tmp = div_u64(tmp, 5 * div1 * div2);
3124 
3125 	return tmp;
3126 }
3127 
3128 /**
3129  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3130  * @crtc_state: state for the CRTC to select the DPLL for
3131  * @port_dpll_id: the active @port_dpll_id to select
3132  *
3133  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3134  * CRTC.
3135  */
3136 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3137 			      enum icl_port_dpll_id port_dpll_id)
3138 {
3139 	struct icl_port_dpll *port_dpll =
3140 		&crtc_state->icl_port_dplls[port_dpll_id];
3141 
3142 	crtc_state->shared_dpll = port_dpll->pll;
3143 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3144 }
3145 
3146 static void icl_update_active_dpll(struct intel_atomic_state *state,
3147 				   struct intel_crtc *crtc,
3148 				   struct intel_encoder *encoder)
3149 {
3150 	struct intel_crtc_state *crtc_state =
3151 		intel_atomic_get_new_crtc_state(state, crtc);
3152 	struct intel_digital_port *primary_port;
3153 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3154 
3155 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3156 		enc_to_mst(encoder)->primary :
3157 		enc_to_dig_port(encoder);
3158 
3159 	if (primary_port &&
3160 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3161 	     intel_tc_port_in_legacy_mode(primary_port)))
3162 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3163 
3164 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3165 }
3166 
3167 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3168 {
3169 	if (!(i915->hti_state & HDPORT_ENABLED))
3170 		return 0;
3171 
3172 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3173 }
3174 
3175 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3176 				      struct intel_crtc *crtc)
3177 {
3178 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3179 	struct intel_crtc_state *crtc_state =
3180 		intel_atomic_get_new_crtc_state(state, crtc);
3181 	struct icl_port_dpll *port_dpll =
3182 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3183 	struct skl_wrpll_params pll_params = {};
3184 	int ret;
3185 
3186 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3187 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3188 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3189 	else
3190 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3191 
3192 	if (ret)
3193 		return ret;
3194 
3195 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3196 
3197 	/* this is mainly for the fastset check */
3198 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3199 
3200 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3201 							    &port_dpll->hw_state);
3202 
3203 	return 0;
3204 }
3205 
3206 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3207 				  struct intel_crtc *crtc,
3208 				  struct intel_encoder *encoder)
3209 {
3210 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3211 	struct intel_crtc_state *crtc_state =
3212 		intel_atomic_get_new_crtc_state(state, crtc);
3213 	struct icl_port_dpll *port_dpll =
3214 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3215 	enum port port = encoder->port;
3216 	unsigned long dpll_mask;
3217 
3218 	if (IS_ALDERLAKE_S(dev_priv)) {
3219 		dpll_mask =
3220 			BIT(DPLL_ID_DG1_DPLL3) |
3221 			BIT(DPLL_ID_DG1_DPLL2) |
3222 			BIT(DPLL_ID_ICL_DPLL1) |
3223 			BIT(DPLL_ID_ICL_DPLL0);
3224 	} else if (IS_DG1(dev_priv)) {
3225 		if (port == PORT_D || port == PORT_E) {
3226 			dpll_mask =
3227 				BIT(DPLL_ID_DG1_DPLL2) |
3228 				BIT(DPLL_ID_DG1_DPLL3);
3229 		} else {
3230 			dpll_mask =
3231 				BIT(DPLL_ID_DG1_DPLL0) |
3232 				BIT(DPLL_ID_DG1_DPLL1);
3233 		}
3234 	} else if (IS_ROCKETLAKE(dev_priv)) {
3235 		dpll_mask =
3236 			BIT(DPLL_ID_EHL_DPLL4) |
3237 			BIT(DPLL_ID_ICL_DPLL1) |
3238 			BIT(DPLL_ID_ICL_DPLL0);
3239 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3240 		dpll_mask =
3241 			BIT(DPLL_ID_EHL_DPLL4) |
3242 			BIT(DPLL_ID_ICL_DPLL1) |
3243 			BIT(DPLL_ID_ICL_DPLL0);
3244 	} else {
3245 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3246 	}
3247 
3248 	/* Eliminate DPLLs from consideration if reserved by HTI */
3249 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3250 
3251 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3252 						&port_dpll->hw_state,
3253 						dpll_mask);
3254 	if (!port_dpll->pll)
3255 		return -EINVAL;
3256 
3257 	intel_reference_shared_dpll(state, crtc,
3258 				    port_dpll->pll, &port_dpll->hw_state);
3259 
3260 	icl_update_active_dpll(state, crtc, encoder);
3261 
3262 	return 0;
3263 }
3264 
3265 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3266 				    struct intel_crtc *crtc)
3267 {
3268 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3269 	struct intel_crtc_state *crtc_state =
3270 		intel_atomic_get_new_crtc_state(state, crtc);
3271 	struct icl_port_dpll *port_dpll =
3272 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3273 	struct skl_wrpll_params pll_params = {};
3274 	int ret;
3275 
3276 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3277 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3278 	if (ret)
3279 		return ret;
3280 
3281 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3282 
3283 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3284 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3285 	if (ret)
3286 		return ret;
3287 
3288 	/* this is mainly for the fastset check */
3289 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3290 
3291 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3292 							 &port_dpll->hw_state);
3293 
3294 	return 0;
3295 }
3296 
3297 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3298 				struct intel_crtc *crtc,
3299 				struct intel_encoder *encoder)
3300 {
3301 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3302 	struct intel_crtc_state *crtc_state =
3303 		intel_atomic_get_new_crtc_state(state, crtc);
3304 	struct icl_port_dpll *port_dpll =
3305 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3306 	enum intel_dpll_id dpll_id;
3307 	int ret;
3308 
3309 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3310 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3311 						&port_dpll->hw_state,
3312 						BIT(DPLL_ID_ICL_TBTPLL));
3313 	if (!port_dpll->pll)
3314 		return -EINVAL;
3315 	intel_reference_shared_dpll(state, crtc,
3316 				    port_dpll->pll, &port_dpll->hw_state);
3317 
3318 
3319 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3320 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3321 							 encoder->port));
3322 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3323 						&port_dpll->hw_state,
3324 						BIT(dpll_id));
3325 	if (!port_dpll->pll) {
3326 		ret = -EINVAL;
3327 		goto err_unreference_tbt_pll;
3328 	}
3329 	intel_reference_shared_dpll(state, crtc,
3330 				    port_dpll->pll, &port_dpll->hw_state);
3331 
3332 	icl_update_active_dpll(state, crtc, encoder);
3333 
3334 	return 0;
3335 
3336 err_unreference_tbt_pll:
3337 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3338 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3339 
3340 	return ret;
3341 }
3342 
3343 static int icl_compute_dplls(struct intel_atomic_state *state,
3344 			     struct intel_crtc *crtc,
3345 			     struct intel_encoder *encoder)
3346 {
3347 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3348 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3349 
3350 	if (intel_phy_is_combo(dev_priv, phy))
3351 		return icl_compute_combo_phy_dpll(state, crtc);
3352 	else if (intel_phy_is_tc(dev_priv, phy))
3353 		return icl_compute_tc_phy_dplls(state, crtc);
3354 
3355 	MISSING_CASE(phy);
3356 
3357 	return 0;
3358 }
3359 
3360 static int icl_get_dplls(struct intel_atomic_state *state,
3361 			 struct intel_crtc *crtc,
3362 			 struct intel_encoder *encoder)
3363 {
3364 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3365 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3366 
3367 	if (intel_phy_is_combo(dev_priv, phy))
3368 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3369 	else if (intel_phy_is_tc(dev_priv, phy))
3370 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3371 
3372 	MISSING_CASE(phy);
3373 
3374 	return -EINVAL;
3375 }
3376 
3377 static void icl_put_dplls(struct intel_atomic_state *state,
3378 			  struct intel_crtc *crtc)
3379 {
3380 	const struct intel_crtc_state *old_crtc_state =
3381 		intel_atomic_get_old_crtc_state(state, crtc);
3382 	struct intel_crtc_state *new_crtc_state =
3383 		intel_atomic_get_new_crtc_state(state, crtc);
3384 	enum icl_port_dpll_id id;
3385 
3386 	new_crtc_state->shared_dpll = NULL;
3387 
3388 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3389 		const struct icl_port_dpll *old_port_dpll =
3390 			&old_crtc_state->icl_port_dplls[id];
3391 		struct icl_port_dpll *new_port_dpll =
3392 			&new_crtc_state->icl_port_dplls[id];
3393 
3394 		new_port_dpll->pll = NULL;
3395 
3396 		if (!old_port_dpll->pll)
3397 			continue;
3398 
3399 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3400 	}
3401 }
3402 
3403 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3404 				struct intel_shared_dpll *pll,
3405 				struct intel_dpll_hw_state *hw_state)
3406 {
3407 	const enum intel_dpll_id id = pll->info->id;
3408 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3409 	intel_wakeref_t wakeref;
3410 	bool ret = false;
3411 	u32 val;
3412 
3413 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3414 
3415 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3416 						     POWER_DOMAIN_DISPLAY_CORE);
3417 	if (!wakeref)
3418 		return false;
3419 
3420 	val = intel_de_read(dev_priv, enable_reg);
3421 	if (!(val & PLL_ENABLE))
3422 		goto out;
3423 
3424 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3425 						  MG_REFCLKIN_CTL(tc_port));
3426 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3427 
3428 	hw_state->mg_clktop2_coreclkctl1 =
3429 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3430 	hw_state->mg_clktop2_coreclkctl1 &=
3431 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3432 
3433 	hw_state->mg_clktop2_hsclkctl =
3434 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3435 	hw_state->mg_clktop2_hsclkctl &=
3436 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3437 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3438 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3439 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3440 
3441 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3442 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3443 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3444 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3445 						   MG_PLL_FRAC_LOCK(tc_port));
3446 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3447 
3448 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3449 	hw_state->mg_pll_tdc_coldst_bias =
3450 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3451 
3452 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3453 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3454 		hw_state->mg_pll_bias_mask = 0;
3455 	} else {
3456 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3457 		hw_state->mg_pll_bias_mask = -1U;
3458 	}
3459 
3460 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3461 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3462 
3463 	ret = true;
3464 out:
3465 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3466 	return ret;
3467 }
3468 
3469 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3470 				 struct intel_shared_dpll *pll,
3471 				 struct intel_dpll_hw_state *hw_state)
3472 {
3473 	const enum intel_dpll_id id = pll->info->id;
3474 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3475 	intel_wakeref_t wakeref;
3476 	bool ret = false;
3477 	u32 val;
3478 
3479 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3480 						     POWER_DOMAIN_DISPLAY_CORE);
3481 	if (!wakeref)
3482 		return false;
3483 
3484 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3485 	if (!(val & PLL_ENABLE))
3486 		goto out;
3487 
3488 	/*
3489 	 * All registers read here have the same HIP_INDEX_REG even though
3490 	 * they are on different building blocks
3491 	 */
3492 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3493 						       DKL_REFCLKIN_CTL(tc_port));
3494 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3495 
3496 	hw_state->mg_clktop2_hsclkctl =
3497 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3498 	hw_state->mg_clktop2_hsclkctl &=
3499 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3500 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3501 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3502 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3503 
3504 	hw_state->mg_clktop2_coreclkctl1 =
3505 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3506 	hw_state->mg_clktop2_coreclkctl1 &=
3507 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3508 
3509 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3510 	val = DKL_PLL_DIV0_MASK;
3511 	if (dev_priv->display.vbt.override_afc_startup)
3512 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3513 	hw_state->mg_pll_div0 &= val;
3514 
3515 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3516 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3517 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3518 
3519 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3520 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3521 				 DKL_PLL_SSC_STEP_LEN_MASK |
3522 				 DKL_PLL_SSC_STEP_NUM_MASK |
3523 				 DKL_PLL_SSC_EN);
3524 
3525 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3526 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3527 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3528 
3529 	hw_state->mg_pll_tdc_coldst_bias =
3530 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3531 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3532 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3533 
3534 	ret = true;
3535 out:
3536 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3537 	return ret;
3538 }
3539 
3540 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3541 				 struct intel_shared_dpll *pll,
3542 				 struct intel_dpll_hw_state *hw_state,
3543 				 i915_reg_t enable_reg)
3544 {
3545 	const enum intel_dpll_id id = pll->info->id;
3546 	intel_wakeref_t wakeref;
3547 	bool ret = false;
3548 	u32 val;
3549 
3550 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3551 						     POWER_DOMAIN_DISPLAY_CORE);
3552 	if (!wakeref)
3553 		return false;
3554 
3555 	val = intel_de_read(dev_priv, enable_reg);
3556 	if (!(val & PLL_ENABLE))
3557 		goto out;
3558 
3559 	if (IS_ALDERLAKE_S(dev_priv)) {
3560 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3561 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3562 	} else if (IS_DG1(dev_priv)) {
3563 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3564 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3565 	} else if (IS_ROCKETLAKE(dev_priv)) {
3566 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3567 						 RKL_DPLL_CFGCR0(id));
3568 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3569 						 RKL_DPLL_CFGCR1(id));
3570 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3571 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3572 						 TGL_DPLL_CFGCR0(id));
3573 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3574 						 TGL_DPLL_CFGCR1(id));
3575 		if (dev_priv->display.vbt.override_afc_startup) {
3576 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3577 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3578 		}
3579 	} else {
3580 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3581 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3582 							 ICL_DPLL_CFGCR0(4));
3583 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3584 							 ICL_DPLL_CFGCR1(4));
3585 		} else {
3586 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3587 							 ICL_DPLL_CFGCR0(id));
3588 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3589 							 ICL_DPLL_CFGCR1(id));
3590 		}
3591 	}
3592 
3593 	ret = true;
3594 out:
3595 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3596 	return ret;
3597 }
3598 
3599 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3600 				   struct intel_shared_dpll *pll,
3601 				   struct intel_dpll_hw_state *hw_state)
3602 {
3603 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3604 
3605 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3606 }
3607 
3608 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3609 				 struct intel_shared_dpll *pll,
3610 				 struct intel_dpll_hw_state *hw_state)
3611 {
3612 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3613 }
3614 
3615 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3616 			   struct intel_shared_dpll *pll)
3617 {
3618 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3619 	const enum intel_dpll_id id = pll->info->id;
3620 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3621 
3622 	if (IS_ALDERLAKE_S(dev_priv)) {
3623 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3624 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3625 	} else if (IS_DG1(dev_priv)) {
3626 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3627 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3628 	} else if (IS_ROCKETLAKE(dev_priv)) {
3629 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3630 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3631 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3632 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3633 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3634 		div0_reg = TGL_DPLL0_DIV0(id);
3635 	} else {
3636 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3637 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3638 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3639 		} else {
3640 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3641 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3642 		}
3643 	}
3644 
3645 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3646 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3647 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3648 			 !i915_mmio_reg_valid(div0_reg));
3649 	if (dev_priv->display.vbt.override_afc_startup &&
3650 	    i915_mmio_reg_valid(div0_reg))
3651 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3652 			     hw_state->div0);
3653 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3654 }
3655 
3656 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3657 			     struct intel_shared_dpll *pll)
3658 {
3659 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3660 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3661 	u32 val;
3662 
3663 	/*
3664 	 * Some of the following registers have reserved fields, so program
3665 	 * these with RMW based on a mask. The mask can be fixed or generated
3666 	 * during the calc/readout phase if the mask depends on some other HW
3667 	 * state like refclk, see icl_calc_mg_pll_state().
3668 	 */
3669 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3670 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3671 	val |= hw_state->mg_refclkin_ctl;
3672 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3673 
3674 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3675 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3676 	val |= hw_state->mg_clktop2_coreclkctl1;
3677 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3678 
3679 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3680 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3681 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3682 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3683 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3684 	val |= hw_state->mg_clktop2_hsclkctl;
3685 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3686 
3687 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3688 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3689 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3690 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3691 		       hw_state->mg_pll_frac_lock);
3692 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3693 
3694 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3695 	val &= ~hw_state->mg_pll_bias_mask;
3696 	val |= hw_state->mg_pll_bias;
3697 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3698 
3699 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3700 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3701 	val |= hw_state->mg_pll_tdc_coldst_bias;
3702 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3703 
3704 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3705 }
3706 
3707 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3708 			  struct intel_shared_dpll *pll)
3709 {
3710 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3711 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3712 	u32 val;
3713 
3714 	/*
3715 	 * All registers programmed here have the same HIP_INDEX_REG even
3716 	 * though on different building block
3717 	 */
3718 	/* All the registers are RMW */
3719 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3720 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3721 	val |= hw_state->mg_refclkin_ctl;
3722 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3723 
3724 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3725 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3726 	val |= hw_state->mg_clktop2_coreclkctl1;
3727 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3728 
3729 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3730 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3731 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3732 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3733 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3734 	val |= hw_state->mg_clktop2_hsclkctl;
3735 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3736 
3737 	val = DKL_PLL_DIV0_MASK;
3738 	if (dev_priv->display.vbt.override_afc_startup)
3739 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3740 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3741 			  hw_state->mg_pll_div0);
3742 
3743 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3744 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3745 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3746 	val |= hw_state->mg_pll_div1;
3747 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3748 
3749 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3750 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3751 		 DKL_PLL_SSC_STEP_LEN_MASK |
3752 		 DKL_PLL_SSC_STEP_NUM_MASK |
3753 		 DKL_PLL_SSC_EN);
3754 	val |= hw_state->mg_pll_ssc;
3755 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3756 
3757 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3758 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3759 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3760 	val |= hw_state->mg_pll_bias;
3761 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3762 
3763 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3764 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3765 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3766 	val |= hw_state->mg_pll_tdc_coldst_bias;
3767 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3768 
3769 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3770 }
3771 
3772 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3773 				 struct intel_shared_dpll *pll,
3774 				 i915_reg_t enable_reg)
3775 {
3776 	u32 val;
3777 
3778 	val = intel_de_read(dev_priv, enable_reg);
3779 	val |= PLL_POWER_ENABLE;
3780 	intel_de_write(dev_priv, enable_reg, val);
3781 
3782 	/*
3783 	 * The spec says we need to "wait" but it also says it should be
3784 	 * immediate.
3785 	 */
3786 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3787 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3788 			pll->info->id);
3789 }
3790 
3791 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3792 			   struct intel_shared_dpll *pll,
3793 			   i915_reg_t enable_reg)
3794 {
3795 	u32 val;
3796 
3797 	val = intel_de_read(dev_priv, enable_reg);
3798 	val |= PLL_ENABLE;
3799 	intel_de_write(dev_priv, enable_reg, val);
3800 
3801 	/* Timeout is actually 600us. */
3802 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3803 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3804 }
3805 
3806 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3807 {
3808 	u32 val;
3809 
3810 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3811 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3812 		return;
3813 	/*
3814 	 * Wa_16011069516:adl-p[a0]
3815 	 *
3816 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3817 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3818 	 * sanity check this assumption with a double read, which presumably
3819 	 * returns the correct value even with clock gating on.
3820 	 *
3821 	 * Instead of the usual place for workarounds we apply this one here,
3822 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3823 	 */
3824 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3825 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3826 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3827 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3828 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3829 }
3830 
3831 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3832 			     struct intel_shared_dpll *pll)
3833 {
3834 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3835 
3836 	if (IS_JSL_EHL(dev_priv) &&
3837 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3838 
3839 		/*
3840 		 * We need to disable DC states when this DPLL is enabled.
3841 		 * This can be done by taking a reference on DPLL4 power
3842 		 * domain.
3843 		 */
3844 		pll->wakeref = intel_display_power_get(dev_priv,
3845 						       POWER_DOMAIN_DC_OFF);
3846 	}
3847 
3848 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3849 
3850 	icl_dpll_write(dev_priv, pll);
3851 
3852 	/*
3853 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3854 	 * paths should already be setting the appropriate voltage, hence we do
3855 	 * nothing here.
3856 	 */
3857 
3858 	icl_pll_enable(dev_priv, pll, enable_reg);
3859 
3860 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3861 
3862 	/* DVFS post sequence would be here. See the comment above. */
3863 }
3864 
3865 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3866 			   struct intel_shared_dpll *pll)
3867 {
3868 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3869 
3870 	icl_dpll_write(dev_priv, pll);
3871 
3872 	/*
3873 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3874 	 * paths should already be setting the appropriate voltage, hence we do
3875 	 * nothing here.
3876 	 */
3877 
3878 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3879 
3880 	/* DVFS post sequence would be here. See the comment above. */
3881 }
3882 
3883 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3884 			  struct intel_shared_dpll *pll)
3885 {
3886 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3887 
3888 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3889 
3890 	if (DISPLAY_VER(dev_priv) >= 12)
3891 		dkl_pll_write(dev_priv, pll);
3892 	else
3893 		icl_mg_pll_write(dev_priv, pll);
3894 
3895 	/*
3896 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3897 	 * paths should already be setting the appropriate voltage, hence we do
3898 	 * nothing here.
3899 	 */
3900 
3901 	icl_pll_enable(dev_priv, pll, enable_reg);
3902 
3903 	/* DVFS post sequence would be here. See the comment above. */
3904 }
3905 
3906 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3907 			    struct intel_shared_dpll *pll,
3908 			    i915_reg_t enable_reg)
3909 {
3910 	u32 val;
3911 
3912 	/* The first steps are done by intel_ddi_post_disable(). */
3913 
3914 	/*
3915 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3916 	 * paths should already be setting the appropriate voltage, hence we do
3917 	 * nothing here.
3918 	 */
3919 
3920 	val = intel_de_read(dev_priv, enable_reg);
3921 	val &= ~PLL_ENABLE;
3922 	intel_de_write(dev_priv, enable_reg, val);
3923 
3924 	/* Timeout is actually 1us. */
3925 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3926 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3927 
3928 	/* DVFS post sequence would be here. See the comment above. */
3929 
3930 	val = intel_de_read(dev_priv, enable_reg);
3931 	val &= ~PLL_POWER_ENABLE;
3932 	intel_de_write(dev_priv, enable_reg, val);
3933 
3934 	/*
3935 	 * The spec says we need to "wait" but it also says it should be
3936 	 * immediate.
3937 	 */
3938 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3939 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3940 			pll->info->id);
3941 }
3942 
3943 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3944 			      struct intel_shared_dpll *pll)
3945 {
3946 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3947 
3948 	icl_pll_disable(dev_priv, pll, enable_reg);
3949 
3950 	if (IS_JSL_EHL(dev_priv) &&
3951 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3952 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3953 					pll->wakeref);
3954 }
3955 
3956 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3957 			    struct intel_shared_dpll *pll)
3958 {
3959 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3960 }
3961 
3962 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3963 			   struct intel_shared_dpll *pll)
3964 {
3965 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3966 
3967 	icl_pll_disable(dev_priv, pll, enable_reg);
3968 }
3969 
3970 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3971 {
3972 	/* No SSC ref */
3973 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3974 }
3975 
3976 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3977 			      const struct intel_dpll_hw_state *hw_state)
3978 {
3979 	drm_dbg_kms(&dev_priv->drm,
3980 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3981 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3982 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3983 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3984 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3985 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3986 		    hw_state->cfgcr0, hw_state->cfgcr1,
3987 		    hw_state->div0,
3988 		    hw_state->mg_refclkin_ctl,
3989 		    hw_state->mg_clktop2_coreclkctl1,
3990 		    hw_state->mg_clktop2_hsclkctl,
3991 		    hw_state->mg_pll_div0,
3992 		    hw_state->mg_pll_div1,
3993 		    hw_state->mg_pll_lf,
3994 		    hw_state->mg_pll_frac_lock,
3995 		    hw_state->mg_pll_ssc,
3996 		    hw_state->mg_pll_bias,
3997 		    hw_state->mg_pll_tdc_coldst_bias);
3998 }
3999 
4000 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4001 	.enable = combo_pll_enable,
4002 	.disable = combo_pll_disable,
4003 	.get_hw_state = combo_pll_get_hw_state,
4004 	.get_freq = icl_ddi_combo_pll_get_freq,
4005 };
4006 
4007 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4008 	.enable = tbt_pll_enable,
4009 	.disable = tbt_pll_disable,
4010 	.get_hw_state = tbt_pll_get_hw_state,
4011 	.get_freq = icl_ddi_tbt_pll_get_freq,
4012 };
4013 
4014 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4015 	.enable = mg_pll_enable,
4016 	.disable = mg_pll_disable,
4017 	.get_hw_state = mg_pll_get_hw_state,
4018 	.get_freq = icl_ddi_mg_pll_get_freq,
4019 };
4020 
4021 static const struct dpll_info icl_plls[] = {
4022 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4023 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4024 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4025 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4026 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4027 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4028 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4029 	{ },
4030 };
4031 
4032 static const struct intel_dpll_mgr icl_pll_mgr = {
4033 	.dpll_info = icl_plls,
4034 	.compute_dplls = icl_compute_dplls,
4035 	.get_dplls = icl_get_dplls,
4036 	.put_dplls = icl_put_dplls,
4037 	.update_active_dpll = icl_update_active_dpll,
4038 	.update_ref_clks = icl_update_dpll_ref_clks,
4039 	.dump_hw_state = icl_dump_hw_state,
4040 };
4041 
4042 static const struct dpll_info ehl_plls[] = {
4043 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4044 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4045 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4046 	{ },
4047 };
4048 
4049 static const struct intel_dpll_mgr ehl_pll_mgr = {
4050 	.dpll_info = ehl_plls,
4051 	.compute_dplls = icl_compute_dplls,
4052 	.get_dplls = icl_get_dplls,
4053 	.put_dplls = icl_put_dplls,
4054 	.update_ref_clks = icl_update_dpll_ref_clks,
4055 	.dump_hw_state = icl_dump_hw_state,
4056 };
4057 
4058 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4059 	.enable = mg_pll_enable,
4060 	.disable = mg_pll_disable,
4061 	.get_hw_state = dkl_pll_get_hw_state,
4062 	.get_freq = icl_ddi_mg_pll_get_freq,
4063 };
4064 
4065 static const struct dpll_info tgl_plls[] = {
4066 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4067 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4068 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4069 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4070 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4071 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4072 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4073 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4074 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4075 	{ },
4076 };
4077 
4078 static const struct intel_dpll_mgr tgl_pll_mgr = {
4079 	.dpll_info = tgl_plls,
4080 	.compute_dplls = icl_compute_dplls,
4081 	.get_dplls = icl_get_dplls,
4082 	.put_dplls = icl_put_dplls,
4083 	.update_active_dpll = icl_update_active_dpll,
4084 	.update_ref_clks = icl_update_dpll_ref_clks,
4085 	.dump_hw_state = icl_dump_hw_state,
4086 };
4087 
4088 static const struct dpll_info rkl_plls[] = {
4089 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4090 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4091 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4092 	{ },
4093 };
4094 
4095 static const struct intel_dpll_mgr rkl_pll_mgr = {
4096 	.dpll_info = rkl_plls,
4097 	.compute_dplls = icl_compute_dplls,
4098 	.get_dplls = icl_get_dplls,
4099 	.put_dplls = icl_put_dplls,
4100 	.update_ref_clks = icl_update_dpll_ref_clks,
4101 	.dump_hw_state = icl_dump_hw_state,
4102 };
4103 
4104 static const struct dpll_info dg1_plls[] = {
4105 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4106 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4107 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4108 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4109 	{ },
4110 };
4111 
4112 static const struct intel_dpll_mgr dg1_pll_mgr = {
4113 	.dpll_info = dg1_plls,
4114 	.compute_dplls = icl_compute_dplls,
4115 	.get_dplls = icl_get_dplls,
4116 	.put_dplls = icl_put_dplls,
4117 	.update_ref_clks = icl_update_dpll_ref_clks,
4118 	.dump_hw_state = icl_dump_hw_state,
4119 };
4120 
4121 static const struct dpll_info adls_plls[] = {
4122 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4123 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4124 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4125 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4126 	{ },
4127 };
4128 
4129 static const struct intel_dpll_mgr adls_pll_mgr = {
4130 	.dpll_info = adls_plls,
4131 	.compute_dplls = icl_compute_dplls,
4132 	.get_dplls = icl_get_dplls,
4133 	.put_dplls = icl_put_dplls,
4134 	.update_ref_clks = icl_update_dpll_ref_clks,
4135 	.dump_hw_state = icl_dump_hw_state,
4136 };
4137 
4138 static const struct dpll_info adlp_plls[] = {
4139 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4140 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4141 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4142 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4143 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4144 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4145 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4146 	{ },
4147 };
4148 
4149 static const struct intel_dpll_mgr adlp_pll_mgr = {
4150 	.dpll_info = adlp_plls,
4151 	.compute_dplls = icl_compute_dplls,
4152 	.get_dplls = icl_get_dplls,
4153 	.put_dplls = icl_put_dplls,
4154 	.update_active_dpll = icl_update_active_dpll,
4155 	.update_ref_clks = icl_update_dpll_ref_clks,
4156 	.dump_hw_state = icl_dump_hw_state,
4157 };
4158 
4159 /**
4160  * intel_shared_dpll_init - Initialize shared DPLLs
4161  * @dev_priv: i915 device
4162  *
4163  * Initialize shared DPLLs for @dev_priv.
4164  */
4165 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4166 {
4167 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4168 	const struct dpll_info *dpll_info;
4169 	int i;
4170 
4171 	mutex_init(&dev_priv->display.dpll.lock);
4172 
4173 	if (IS_DG2(dev_priv))
4174 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4175 		dpll_mgr = NULL;
4176 	else if (IS_ALDERLAKE_P(dev_priv))
4177 		dpll_mgr = &adlp_pll_mgr;
4178 	else if (IS_ALDERLAKE_S(dev_priv))
4179 		dpll_mgr = &adls_pll_mgr;
4180 	else if (IS_DG1(dev_priv))
4181 		dpll_mgr = &dg1_pll_mgr;
4182 	else if (IS_ROCKETLAKE(dev_priv))
4183 		dpll_mgr = &rkl_pll_mgr;
4184 	else if (DISPLAY_VER(dev_priv) >= 12)
4185 		dpll_mgr = &tgl_pll_mgr;
4186 	else if (IS_JSL_EHL(dev_priv))
4187 		dpll_mgr = &ehl_pll_mgr;
4188 	else if (DISPLAY_VER(dev_priv) >= 11)
4189 		dpll_mgr = &icl_pll_mgr;
4190 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4191 		dpll_mgr = &bxt_pll_mgr;
4192 	else if (DISPLAY_VER(dev_priv) == 9)
4193 		dpll_mgr = &skl_pll_mgr;
4194 	else if (HAS_DDI(dev_priv))
4195 		dpll_mgr = &hsw_pll_mgr;
4196 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4197 		dpll_mgr = &pch_pll_mgr;
4198 
4199 	if (!dpll_mgr) {
4200 		dev_priv->display.dpll.num_shared_dpll = 0;
4201 		return;
4202 	}
4203 
4204 	dpll_info = dpll_mgr->dpll_info;
4205 
4206 	for (i = 0; dpll_info[i].name; i++) {
4207 		if (drm_WARN_ON(&dev_priv->drm,
4208 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4209 			break;
4210 
4211 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4212 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4213 	}
4214 
4215 	dev_priv->display.dpll.mgr = dpll_mgr;
4216 	dev_priv->display.dpll.num_shared_dpll = i;
4217 }
4218 
4219 /**
4220  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4221  * @state: atomic state
4222  * @crtc: CRTC to compute DPLLs for
4223  * @encoder: encoder
4224  *
4225  * This function computes the DPLL state for the given CRTC and encoder.
4226  *
4227  * The new configuration in the atomic commit @state is made effective by
4228  * calling intel_shared_dpll_swap_state().
4229  *
4230  * Returns:
4231  * 0 on success, negative error code on falure.
4232  */
4233 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4234 			       struct intel_crtc *crtc,
4235 			       struct intel_encoder *encoder)
4236 {
4237 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4238 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4239 
4240 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4241 		return -EINVAL;
4242 
4243 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4244 }
4245 
4246 /**
4247  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4248  * @state: atomic state
4249  * @crtc: CRTC to reserve DPLLs for
4250  * @encoder: encoder
4251  *
4252  * This function reserves all required DPLLs for the given CRTC and encoder
4253  * combination in the current atomic commit @state and the new @crtc atomic
4254  * state.
4255  *
4256  * The new configuration in the atomic commit @state is made effective by
4257  * calling intel_shared_dpll_swap_state().
4258  *
4259  * The reserved DPLLs should be released by calling
4260  * intel_release_shared_dplls().
4261  *
4262  * Returns:
4263  * 0 if all required DPLLs were successfully reserved,
4264  * negative error code otherwise.
4265  */
4266 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4267 			       struct intel_crtc *crtc,
4268 			       struct intel_encoder *encoder)
4269 {
4270 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4271 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4272 
4273 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4274 		return -EINVAL;
4275 
4276 	return dpll_mgr->get_dplls(state, crtc, encoder);
4277 }
4278 
4279 /**
4280  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4281  * @state: atomic state
4282  * @crtc: crtc from which the DPLLs are to be released
4283  *
4284  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4285  * from the current atomic commit @state and the old @crtc atomic state.
4286  *
4287  * The new configuration in the atomic commit @state is made effective by
4288  * calling intel_shared_dpll_swap_state().
4289  */
4290 void intel_release_shared_dplls(struct intel_atomic_state *state,
4291 				struct intel_crtc *crtc)
4292 {
4293 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4294 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4295 
4296 	/*
4297 	 * FIXME: this function is called for every platform having a
4298 	 * compute_clock hook, even though the platform doesn't yet support
4299 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4300 	 * called on those.
4301 	 */
4302 	if (!dpll_mgr)
4303 		return;
4304 
4305 	dpll_mgr->put_dplls(state, crtc);
4306 }
4307 
4308 /**
4309  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4310  * @state: atomic state
4311  * @crtc: the CRTC for which to update the active DPLL
4312  * @encoder: encoder determining the type of port DPLL
4313  *
4314  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4315  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4316  * DPLL selected will be based on the current mode of the encoder's port.
4317  */
4318 void intel_update_active_dpll(struct intel_atomic_state *state,
4319 			      struct intel_crtc *crtc,
4320 			      struct intel_encoder *encoder)
4321 {
4322 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4323 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4324 
4325 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4326 		return;
4327 
4328 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4329 }
4330 
4331 /**
4332  * intel_dpll_get_freq - calculate the DPLL's output frequency
4333  * @i915: i915 device
4334  * @pll: DPLL for which to calculate the output frequency
4335  * @pll_state: DPLL state from which to calculate the output frequency
4336  *
4337  * Return the output frequency corresponding to @pll's passed in @pll_state.
4338  */
4339 int intel_dpll_get_freq(struct drm_i915_private *i915,
4340 			const struct intel_shared_dpll *pll,
4341 			const struct intel_dpll_hw_state *pll_state)
4342 {
4343 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4344 		return 0;
4345 
4346 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4347 }
4348 
4349 /**
4350  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4351  * @i915: i915 device
4352  * @pll: DPLL for which to calculate the output frequency
4353  * @hw_state: DPLL's hardware state
4354  *
4355  * Read out @pll's hardware state into @hw_state.
4356  */
4357 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4358 			     struct intel_shared_dpll *pll,
4359 			     struct intel_dpll_hw_state *hw_state)
4360 {
4361 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4362 }
4363 
4364 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4365 				  struct intel_shared_dpll *pll)
4366 {
4367 	struct intel_crtc *crtc;
4368 
4369 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4370 
4371 	if (IS_JSL_EHL(i915) && pll->on &&
4372 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4373 		pll->wakeref = intel_display_power_get(i915,
4374 						       POWER_DOMAIN_DC_OFF);
4375 	}
4376 
4377 	pll->state.pipe_mask = 0;
4378 	for_each_intel_crtc(&i915->drm, crtc) {
4379 		struct intel_crtc_state *crtc_state =
4380 			to_intel_crtc_state(crtc->base.state);
4381 
4382 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4383 			pll->state.pipe_mask |= BIT(crtc->pipe);
4384 	}
4385 	pll->active_mask = pll->state.pipe_mask;
4386 
4387 	drm_dbg_kms(&i915->drm,
4388 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4389 		    pll->info->name, pll->state.pipe_mask, pll->on);
4390 }
4391 
4392 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4393 {
4394 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4395 		i915->display.dpll.mgr->update_ref_clks(i915);
4396 }
4397 
4398 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4399 {
4400 	int i;
4401 
4402 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4403 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4404 }
4405 
4406 static void sanitize_dpll_state(struct drm_i915_private *i915,
4407 				struct intel_shared_dpll *pll)
4408 {
4409 	if (!pll->on)
4410 		return;
4411 
4412 	adlp_cmtg_clock_gating_wa(i915, pll);
4413 
4414 	if (pll->active_mask)
4415 		return;
4416 
4417 	drm_dbg_kms(&i915->drm,
4418 		    "%s enabled but not in use, disabling\n",
4419 		    pll->info->name);
4420 
4421 	pll->info->funcs->disable(i915, pll);
4422 	pll->on = false;
4423 }
4424 
4425 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4426 {
4427 	int i;
4428 
4429 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4430 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4431 }
4432 
4433 /**
4434  * intel_dpll_dump_hw_state - write hw_state to dmesg
4435  * @dev_priv: i915 drm device
4436  * @hw_state: hw state to be written to the log
4437  *
4438  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4439  */
4440 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4441 			      const struct intel_dpll_hw_state *hw_state)
4442 {
4443 	if (dev_priv->display.dpll.mgr) {
4444 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4445 	} else {
4446 		/* fallback for platforms that don't use the shared dpll
4447 		 * infrastructure
4448 		 */
4449 		drm_dbg_kms(&dev_priv->drm,
4450 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4451 			    "fp0: 0x%x, fp1: 0x%x\n",
4452 			    hw_state->dpll,
4453 			    hw_state->dpll_md,
4454 			    hw_state->fp0,
4455 			    hw_state->fp1);
4456 	}
4457 }
4458 
4459 static void
4460 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4461 			 struct intel_shared_dpll *pll,
4462 			 struct intel_crtc *crtc,
4463 			 struct intel_crtc_state *new_crtc_state)
4464 {
4465 	struct intel_dpll_hw_state dpll_hw_state;
4466 	u8 pipe_mask;
4467 	bool active;
4468 
4469 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4470 
4471 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4472 
4473 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4474 
4475 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4476 		I915_STATE_WARN(!pll->on && pll->active_mask,
4477 				"pll in active use but not on in sw tracking\n");
4478 		I915_STATE_WARN(pll->on && !pll->active_mask,
4479 				"pll is on but not used by any active pipe\n");
4480 		I915_STATE_WARN(pll->on != active,
4481 				"pll on state mismatch (expected %i, found %i)\n",
4482 				pll->on, active);
4483 	}
4484 
4485 	if (!crtc) {
4486 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4487 				"more active pll users than references: 0x%x vs 0x%x\n",
4488 				pll->active_mask, pll->state.pipe_mask);
4489 
4490 		return;
4491 	}
4492 
4493 	pipe_mask = BIT(crtc->pipe);
4494 
4495 	if (new_crtc_state->hw.active)
4496 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4497 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4498 				pipe_name(crtc->pipe), pll->active_mask);
4499 	else
4500 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4501 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4502 				pipe_name(crtc->pipe), pll->active_mask);
4503 
4504 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4505 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4506 			pipe_mask, pll->state.pipe_mask);
4507 
4508 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4509 					  &dpll_hw_state,
4510 					  sizeof(dpll_hw_state)),
4511 			"pll hw state mismatch\n");
4512 }
4513 
4514 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4515 				    struct intel_crtc_state *old_crtc_state,
4516 				    struct intel_crtc_state *new_crtc_state)
4517 {
4518 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4519 
4520 	if (new_crtc_state->shared_dpll)
4521 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4522 					 crtc, new_crtc_state);
4523 
4524 	if (old_crtc_state->shared_dpll &&
4525 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4526 		u8 pipe_mask = BIT(crtc->pipe);
4527 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4528 
4529 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4530 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4531 				pipe_name(crtc->pipe), pll->active_mask);
4532 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4533 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4534 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4535 	}
4536 }
4537 
4538 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4539 {
4540 	int i;
4541 
4542 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4543 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4544 					 NULL, NULL);
4545 }
4546