1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "i915_reg.h"
27 #include "intel_de.h"
28 #include "intel_display_types.h"
29 #include "intel_dkl_phy.h"
30 #include "intel_dkl_phy_regs.h"
31 #include "intel_dpio_phy.h"
32 #include "intel_dpll.h"
33 #include "intel_dpll_mgr.h"
34 #include "intel_hti.h"
35 #include "intel_mg_phy_regs.h"
36 #include "intel_pch_refclk.h"
37 #include "intel_tc.h"
38 
39 /**
40  * DOC: Display PLLs
41  *
42  * Display PLLs used for driving outputs vary by platform. While some have
43  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
44  * from a pool. In the latter scenario, it is possible that multiple pipes
45  * share a PLL if their configurations match.
46  *
47  * This file provides an abstraction over display PLLs. The function
48  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
49  * users of a PLL are tracked and that tracking is integrated with the atomic
50  * modset interface. During an atomic operation, required PLLs can be reserved
51  * for a given CRTC and encoder configuration by calling
52  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
53  * with intel_release_shared_dplls().
54  * Changes to the users are first staged in the atomic state, and then made
55  * effective by calling intel_shared_dpll_swap_state() during the atomic
56  * commit phase.
57  */
58 
59 /* platform specific hooks for managing DPLLs */
60 struct intel_shared_dpll_funcs {
61 	/*
62 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
63 	 * the pll is not already enabled.
64 	 */
65 	void (*enable)(struct drm_i915_private *i915,
66 		       struct intel_shared_dpll *pll);
67 
68 	/*
69 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
70 	 * only when it is safe to disable the pll, i.e., there are no more
71 	 * tracked users for it.
72 	 */
73 	void (*disable)(struct drm_i915_private *i915,
74 			struct intel_shared_dpll *pll);
75 
76 	/*
77 	 * Hook for reading the values currently programmed to the DPLL
78 	 * registers. This is used for initial hw state readout and state
79 	 * verification after a mode set.
80 	 */
81 	bool (*get_hw_state)(struct drm_i915_private *i915,
82 			     struct intel_shared_dpll *pll,
83 			     struct intel_dpll_hw_state *hw_state);
84 
85 	/*
86 	 * Hook for calculating the pll's output frequency based on its passed
87 	 * in state.
88 	 */
89 	int (*get_freq)(struct drm_i915_private *i915,
90 			const struct intel_shared_dpll *pll,
91 			const struct intel_dpll_hw_state *pll_state);
92 };
93 
94 struct intel_dpll_mgr {
95 	const struct dpll_info *dpll_info;
96 
97 	int (*compute_dplls)(struct intel_atomic_state *state,
98 			     struct intel_crtc *crtc,
99 			     struct intel_encoder *encoder);
100 	int (*get_dplls)(struct intel_atomic_state *state,
101 			 struct intel_crtc *crtc,
102 			 struct intel_encoder *encoder);
103 	void (*put_dplls)(struct intel_atomic_state *state,
104 			  struct intel_crtc *crtc);
105 	void (*update_active_dpll)(struct intel_atomic_state *state,
106 				   struct intel_crtc *crtc,
107 				   struct intel_encoder *encoder);
108 	void (*update_ref_clks)(struct drm_i915_private *i915);
109 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
110 			      const struct intel_dpll_hw_state *hw_state);
111 };
112 
113 static void
114 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
115 				  struct intel_shared_dpll_state *shared_dpll)
116 {
117 	enum intel_dpll_id i;
118 
119 	/* Copy shared dpll state */
120 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
121 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
122 
123 		shared_dpll[i] = pll->state;
124 	}
125 }
126 
127 static struct intel_shared_dpll_state *
128 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
129 {
130 	struct intel_atomic_state *state = to_intel_atomic_state(s);
131 
132 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
133 
134 	if (!state->dpll_set) {
135 		state->dpll_set = true;
136 
137 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
138 						  state->shared_dpll);
139 	}
140 
141 	return state->shared_dpll;
142 }
143 
144 /**
145  * intel_get_shared_dpll_by_id - get a DPLL given its id
146  * @dev_priv: i915 device instance
147  * @id: pll id
148  *
149  * Returns:
150  * A pointer to the DPLL with @id
151  */
152 struct intel_shared_dpll *
153 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
154 			    enum intel_dpll_id id)
155 {
156 	return &dev_priv->display.dpll.shared_dplls[id];
157 }
158 
159 /* For ILK+ */
160 void assert_shared_dpll(struct drm_i915_private *dev_priv,
161 			struct intel_shared_dpll *pll,
162 			bool state)
163 {
164 	bool cur_state;
165 	struct intel_dpll_hw_state hw_state;
166 
167 	if (drm_WARN(&dev_priv->drm, !pll,
168 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
169 		return;
170 
171 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 	I915_STATE_WARN(cur_state != state,
173 	     "%s assertion failure (expected %s, current %s)\n",
174 			pll->info->name, str_on_off(state),
175 			str_on_off(cur_state));
176 }
177 
178 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
179 {
180 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
181 }
182 
183 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
184 {
185 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
186 }
187 
188 static i915_reg_t
189 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
190 			   struct intel_shared_dpll *pll)
191 {
192 	if (IS_DG1(i915))
193 		return DG1_DPLL_ENABLE(pll->info->id);
194 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
195 		return MG_PLL_ENABLE(0);
196 
197 	return ICL_DPLL_ENABLE(pll->info->id);
198 }
199 
200 static i915_reg_t
201 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
202 			struct intel_shared_dpll *pll)
203 {
204 	const enum intel_dpll_id id = pll->info->id;
205 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
206 
207 	if (IS_ALDERLAKE_P(i915))
208 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
209 
210 	return MG_PLL_ENABLE(tc_port);
211 }
212 
213 /**
214  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
215  * @crtc_state: CRTC, and its state, which has a shared DPLL
216  *
217  * Enable the shared DPLL used by @crtc.
218  */
219 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
220 {
221 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
222 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
223 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
224 	unsigned int pipe_mask = BIT(crtc->pipe);
225 	unsigned int old_mask;
226 
227 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
228 		return;
229 
230 	mutex_lock(&dev_priv->display.dpll.lock);
231 	old_mask = pll->active_mask;
232 
233 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
234 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
235 		goto out;
236 
237 	pll->active_mask |= pipe_mask;
238 
239 	drm_dbg_kms(&dev_priv->drm,
240 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
241 		    pll->info->name, pll->active_mask, pll->on,
242 		    crtc->base.base.id, crtc->base.name);
243 
244 	if (old_mask) {
245 		drm_WARN_ON(&dev_priv->drm, !pll->on);
246 		assert_shared_dpll_enabled(dev_priv, pll);
247 		goto out;
248 	}
249 	drm_WARN_ON(&dev_priv->drm, pll->on);
250 
251 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
252 	pll->info->funcs->enable(dev_priv, pll);
253 	pll->on = true;
254 
255 out:
256 	mutex_unlock(&dev_priv->display.dpll.lock);
257 }
258 
259 /**
260  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
261  * @crtc_state: CRTC, and its state, which has a shared DPLL
262  *
263  * Disable the shared DPLL used by @crtc.
264  */
265 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
266 {
267 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
268 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
269 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
270 	unsigned int pipe_mask = BIT(crtc->pipe);
271 
272 	/* PCH only available on ILK+ */
273 	if (DISPLAY_VER(dev_priv) < 5)
274 		return;
275 
276 	if (pll == NULL)
277 		return;
278 
279 	mutex_lock(&dev_priv->display.dpll.lock);
280 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
281 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
282 		     crtc->base.base.id, crtc->base.name))
283 		goto out;
284 
285 	drm_dbg_kms(&dev_priv->drm,
286 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
287 		    pll->info->name, pll->active_mask, pll->on,
288 		    crtc->base.base.id, crtc->base.name);
289 
290 	assert_shared_dpll_enabled(dev_priv, pll);
291 	drm_WARN_ON(&dev_priv->drm, !pll->on);
292 
293 	pll->active_mask &= ~pipe_mask;
294 	if (pll->active_mask)
295 		goto out;
296 
297 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
298 	pll->info->funcs->disable(dev_priv, pll);
299 	pll->on = false;
300 
301 out:
302 	mutex_unlock(&dev_priv->display.dpll.lock);
303 }
304 
305 static struct intel_shared_dpll *
306 intel_find_shared_dpll(struct intel_atomic_state *state,
307 		       const struct intel_crtc *crtc,
308 		       const struct intel_dpll_hw_state *pll_state,
309 		       unsigned long dpll_mask)
310 {
311 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312 	struct intel_shared_dpll *pll, *unused_pll = NULL;
313 	struct intel_shared_dpll_state *shared_dpll;
314 	enum intel_dpll_id i;
315 
316 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
317 
318 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
319 
320 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
321 		pll = &dev_priv->display.dpll.shared_dplls[i];
322 
323 		/* Only want to check enabled timings first */
324 		if (shared_dpll[i].pipe_mask == 0) {
325 			if (!unused_pll)
326 				unused_pll = pll;
327 			continue;
328 		}
329 
330 		if (memcmp(pll_state,
331 			   &shared_dpll[i].hw_state,
332 			   sizeof(*pll_state)) == 0) {
333 			drm_dbg_kms(&dev_priv->drm,
334 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
335 				    crtc->base.base.id, crtc->base.name,
336 				    pll->info->name,
337 				    shared_dpll[i].pipe_mask,
338 				    pll->active_mask);
339 			return pll;
340 		}
341 	}
342 
343 	/* Ok no matching timings, maybe there's a free one? */
344 	if (unused_pll) {
345 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
346 			    crtc->base.base.id, crtc->base.name,
347 			    unused_pll->info->name);
348 		return unused_pll;
349 	}
350 
351 	return NULL;
352 }
353 
354 static void
355 intel_reference_shared_dpll(struct intel_atomic_state *state,
356 			    const struct intel_crtc *crtc,
357 			    const struct intel_shared_dpll *pll,
358 			    const struct intel_dpll_hw_state *pll_state)
359 {
360 	struct drm_i915_private *i915 = to_i915(state->base.dev);
361 	struct intel_shared_dpll_state *shared_dpll;
362 	const enum intel_dpll_id id = pll->info->id;
363 
364 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
365 
366 	if (shared_dpll[id].pipe_mask == 0)
367 		shared_dpll[id].hw_state = *pll_state;
368 
369 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0);
370 
371 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
372 
373 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
374 		    crtc->base.base.id, crtc->base.name, pll->info->name);
375 }
376 
377 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
378 					  const struct intel_crtc *crtc,
379 					  const struct intel_shared_dpll *pll)
380 {
381 	struct drm_i915_private *i915 = to_i915(state->base.dev);
382 	struct intel_shared_dpll_state *shared_dpll;
383 	const enum intel_dpll_id id = pll->info->id;
384 
385 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
386 
387 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0);
388 
389 	shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe);
390 
391 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
392 		    crtc->base.base.id, crtc->base.name, pll->info->name);
393 }
394 
395 static void intel_put_dpll(struct intel_atomic_state *state,
396 			   struct intel_crtc *crtc)
397 {
398 	const struct intel_crtc_state *old_crtc_state =
399 		intel_atomic_get_old_crtc_state(state, crtc);
400 	struct intel_crtc_state *new_crtc_state =
401 		intel_atomic_get_new_crtc_state(state, crtc);
402 
403 	new_crtc_state->shared_dpll = NULL;
404 
405 	if (!old_crtc_state->shared_dpll)
406 		return;
407 
408 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
409 }
410 
411 /**
412  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
413  * @state: atomic state
414  *
415  * This is the dpll version of drm_atomic_helper_swap_state() since the
416  * helper does not handle driver-specific global state.
417  *
418  * For consistency with atomic helpers this function does a complete swap,
419  * i.e. it also puts the current state into @state, even though there is no
420  * need for that at this moment.
421  */
422 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
423 {
424 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
425 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
426 	enum intel_dpll_id i;
427 
428 	if (!state->dpll_set)
429 		return;
430 
431 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
432 		struct intel_shared_dpll *pll =
433 			&dev_priv->display.dpll.shared_dplls[i];
434 
435 		swap(pll->state, shared_dpll[i]);
436 	}
437 }
438 
439 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
440 				      struct intel_shared_dpll *pll,
441 				      struct intel_dpll_hw_state *hw_state)
442 {
443 	const enum intel_dpll_id id = pll->info->id;
444 	intel_wakeref_t wakeref;
445 	u32 val;
446 
447 	wakeref = intel_display_power_get_if_enabled(dev_priv,
448 						     POWER_DOMAIN_DISPLAY_CORE);
449 	if (!wakeref)
450 		return false;
451 
452 	val = intel_de_read(dev_priv, PCH_DPLL(id));
453 	hw_state->dpll = val;
454 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
455 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
456 
457 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
458 
459 	return val & DPLL_VCO_ENABLE;
460 }
461 
462 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
463 {
464 	u32 val;
465 	bool enabled;
466 
467 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
468 
469 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
470 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
471 			    DREF_SUPERSPREAD_SOURCE_MASK));
472 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
473 }
474 
475 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
476 				struct intel_shared_dpll *pll)
477 {
478 	const enum intel_dpll_id id = pll->info->id;
479 
480 	/* PCH refclock must be enabled first */
481 	ibx_assert_pch_refclk_enabled(dev_priv);
482 
483 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
484 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
485 
486 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
487 
488 	/* Wait for the clocks to stabilize. */
489 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
490 	udelay(150);
491 
492 	/* The pixel multiplier can only be updated once the
493 	 * DPLL is enabled and the clocks are stable.
494 	 *
495 	 * So write it again.
496 	 */
497 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
498 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
499 	udelay(200);
500 }
501 
502 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
503 				 struct intel_shared_dpll *pll)
504 {
505 	const enum intel_dpll_id id = pll->info->id;
506 
507 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
508 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
509 	udelay(200);
510 }
511 
512 static int ibx_compute_dpll(struct intel_atomic_state *state,
513 			    struct intel_crtc *crtc,
514 			    struct intel_encoder *encoder)
515 {
516 	return 0;
517 }
518 
519 static int ibx_get_dpll(struct intel_atomic_state *state,
520 			struct intel_crtc *crtc,
521 			struct intel_encoder *encoder)
522 {
523 	struct intel_crtc_state *crtc_state =
524 		intel_atomic_get_new_crtc_state(state, crtc);
525 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
526 	struct intel_shared_dpll *pll;
527 	enum intel_dpll_id i;
528 
529 	if (HAS_PCH_IBX(dev_priv)) {
530 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
531 		i = (enum intel_dpll_id) crtc->pipe;
532 		pll = &dev_priv->display.dpll.shared_dplls[i];
533 
534 		drm_dbg_kms(&dev_priv->drm,
535 			    "[CRTC:%d:%s] using pre-allocated %s\n",
536 			    crtc->base.base.id, crtc->base.name,
537 			    pll->info->name);
538 	} else {
539 		pll = intel_find_shared_dpll(state, crtc,
540 					     &crtc_state->dpll_hw_state,
541 					     BIT(DPLL_ID_PCH_PLL_B) |
542 					     BIT(DPLL_ID_PCH_PLL_A));
543 	}
544 
545 	if (!pll)
546 		return -EINVAL;
547 
548 	/* reference the pll */
549 	intel_reference_shared_dpll(state, crtc,
550 				    pll, &crtc_state->dpll_hw_state);
551 
552 	crtc_state->shared_dpll = pll;
553 
554 	return 0;
555 }
556 
557 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
558 			      const struct intel_dpll_hw_state *hw_state)
559 {
560 	drm_dbg_kms(&dev_priv->drm,
561 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
562 		    "fp0: 0x%x, fp1: 0x%x\n",
563 		    hw_state->dpll,
564 		    hw_state->dpll_md,
565 		    hw_state->fp0,
566 		    hw_state->fp1);
567 }
568 
569 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
570 	.enable = ibx_pch_dpll_enable,
571 	.disable = ibx_pch_dpll_disable,
572 	.get_hw_state = ibx_pch_dpll_get_hw_state,
573 };
574 
575 static const struct dpll_info pch_plls[] = {
576 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
577 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
578 	{ },
579 };
580 
581 static const struct intel_dpll_mgr pch_pll_mgr = {
582 	.dpll_info = pch_plls,
583 	.compute_dplls = ibx_compute_dpll,
584 	.get_dplls = ibx_get_dpll,
585 	.put_dplls = intel_put_dpll,
586 	.dump_hw_state = ibx_dump_hw_state,
587 };
588 
589 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
590 				 struct intel_shared_dpll *pll)
591 {
592 	const enum intel_dpll_id id = pll->info->id;
593 
594 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
595 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
596 	udelay(20);
597 }
598 
599 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
600 				struct intel_shared_dpll *pll)
601 {
602 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
603 	intel_de_posting_read(dev_priv, SPLL_CTL);
604 	udelay(20);
605 }
606 
607 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
608 				  struct intel_shared_dpll *pll)
609 {
610 	const enum intel_dpll_id id = pll->info->id;
611 	u32 val;
612 
613 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
614 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
615 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
616 
617 	/*
618 	 * Try to set up the PCH reference clock once all DPLLs
619 	 * that depend on it have been shut down.
620 	 */
621 	if (dev_priv->pch_ssc_use & BIT(id))
622 		intel_init_pch_refclk(dev_priv);
623 }
624 
625 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
626 				 struct intel_shared_dpll *pll)
627 {
628 	enum intel_dpll_id id = pll->info->id;
629 	u32 val;
630 
631 	val = intel_de_read(dev_priv, SPLL_CTL);
632 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
633 	intel_de_posting_read(dev_priv, SPLL_CTL);
634 
635 	/*
636 	 * Try to set up the PCH reference clock once all DPLLs
637 	 * that depend on it have been shut down.
638 	 */
639 	if (dev_priv->pch_ssc_use & BIT(id))
640 		intel_init_pch_refclk(dev_priv);
641 }
642 
643 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
644 				       struct intel_shared_dpll *pll,
645 				       struct intel_dpll_hw_state *hw_state)
646 {
647 	const enum intel_dpll_id id = pll->info->id;
648 	intel_wakeref_t wakeref;
649 	u32 val;
650 
651 	wakeref = intel_display_power_get_if_enabled(dev_priv,
652 						     POWER_DOMAIN_DISPLAY_CORE);
653 	if (!wakeref)
654 		return false;
655 
656 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
657 	hw_state->wrpll = val;
658 
659 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
660 
661 	return val & WRPLL_PLL_ENABLE;
662 }
663 
664 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
665 				      struct intel_shared_dpll *pll,
666 				      struct intel_dpll_hw_state *hw_state)
667 {
668 	intel_wakeref_t wakeref;
669 	u32 val;
670 
671 	wakeref = intel_display_power_get_if_enabled(dev_priv,
672 						     POWER_DOMAIN_DISPLAY_CORE);
673 	if (!wakeref)
674 		return false;
675 
676 	val = intel_de_read(dev_priv, SPLL_CTL);
677 	hw_state->spll = val;
678 
679 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
680 
681 	return val & SPLL_PLL_ENABLE;
682 }
683 
684 #define LC_FREQ 2700
685 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
686 
687 #define P_MIN 2
688 #define P_MAX 64
689 #define P_INC 2
690 
691 /* Constraints for PLL good behavior */
692 #define REF_MIN 48
693 #define REF_MAX 400
694 #define VCO_MIN 2400
695 #define VCO_MAX 4800
696 
697 struct hsw_wrpll_rnp {
698 	unsigned p, n2, r2;
699 };
700 
701 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
702 {
703 	switch (clock) {
704 	case 25175000:
705 	case 25200000:
706 	case 27000000:
707 	case 27027000:
708 	case 37762500:
709 	case 37800000:
710 	case 40500000:
711 	case 40541000:
712 	case 54000000:
713 	case 54054000:
714 	case 59341000:
715 	case 59400000:
716 	case 72000000:
717 	case 74176000:
718 	case 74250000:
719 	case 81000000:
720 	case 81081000:
721 	case 89012000:
722 	case 89100000:
723 	case 108000000:
724 	case 108108000:
725 	case 111264000:
726 	case 111375000:
727 	case 148352000:
728 	case 148500000:
729 	case 162000000:
730 	case 162162000:
731 	case 222525000:
732 	case 222750000:
733 	case 296703000:
734 	case 297000000:
735 		return 0;
736 	case 233500000:
737 	case 245250000:
738 	case 247750000:
739 	case 253250000:
740 	case 298000000:
741 		return 1500;
742 	case 169128000:
743 	case 169500000:
744 	case 179500000:
745 	case 202000000:
746 		return 2000;
747 	case 256250000:
748 	case 262500000:
749 	case 270000000:
750 	case 272500000:
751 	case 273750000:
752 	case 280750000:
753 	case 281250000:
754 	case 286000000:
755 	case 291750000:
756 		return 4000;
757 	case 267250000:
758 	case 268500000:
759 		return 5000;
760 	default:
761 		return 1000;
762 	}
763 }
764 
765 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
766 				 unsigned int r2, unsigned int n2,
767 				 unsigned int p,
768 				 struct hsw_wrpll_rnp *best)
769 {
770 	u64 a, b, c, d, diff, diff_best;
771 
772 	/* No best (r,n,p) yet */
773 	if (best->p == 0) {
774 		best->p = p;
775 		best->n2 = n2;
776 		best->r2 = r2;
777 		return;
778 	}
779 
780 	/*
781 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
782 	 * freq2k.
783 	 *
784 	 * delta = 1e6 *
785 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
786 	 *	   freq2k;
787 	 *
788 	 * and we would like delta <= budget.
789 	 *
790 	 * If the discrepancy is above the PPM-based budget, always prefer to
791 	 * improve upon the previous solution.  However, if you're within the
792 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
793 	 */
794 	a = freq2k * budget * p * r2;
795 	b = freq2k * budget * best->p * best->r2;
796 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
797 	diff_best = abs_diff(freq2k * best->p * best->r2,
798 			     LC_FREQ_2K * best->n2);
799 	c = 1000000 * diff;
800 	d = 1000000 * diff_best;
801 
802 	if (a < c && b < d) {
803 		/* If both are above the budget, pick the closer */
804 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
805 			best->p = p;
806 			best->n2 = n2;
807 			best->r2 = r2;
808 		}
809 	} else if (a >= c && b < d) {
810 		/* If A is below the threshold but B is above it?  Update. */
811 		best->p = p;
812 		best->n2 = n2;
813 		best->r2 = r2;
814 	} else if (a >= c && b >= d) {
815 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
816 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
817 			best->p = p;
818 			best->n2 = n2;
819 			best->r2 = r2;
820 		}
821 	}
822 	/* Otherwise a < c && b >= d, do nothing */
823 }
824 
825 static void
826 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
827 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
828 {
829 	u64 freq2k;
830 	unsigned p, n2, r2;
831 	struct hsw_wrpll_rnp best = {};
832 	unsigned budget;
833 
834 	freq2k = clock / 100;
835 
836 	budget = hsw_wrpll_get_budget_for_freq(clock);
837 
838 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
839 	 * and directly pass the LC PLL to it. */
840 	if (freq2k == 5400000) {
841 		*n2_out = 2;
842 		*p_out = 1;
843 		*r2_out = 2;
844 		return;
845 	}
846 
847 	/*
848 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
849 	 * the WR PLL.
850 	 *
851 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
852 	 * Injecting R2 = 2 * R gives:
853 	 *   REF_MAX * r2 > LC_FREQ * 2 and
854 	 *   REF_MIN * r2 < LC_FREQ * 2
855 	 *
856 	 * Which means the desired boundaries for r2 are:
857 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
858 	 *
859 	 */
860 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
861 	     r2 <= LC_FREQ * 2 / REF_MIN;
862 	     r2++) {
863 
864 		/*
865 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
866 		 *
867 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
868 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
869 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
870 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
871 		 *
872 		 * Which means the desired boundaries for n2 are:
873 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
874 		 */
875 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
876 		     n2 <= VCO_MAX * r2 / LC_FREQ;
877 		     n2++) {
878 
879 			for (p = P_MIN; p <= P_MAX; p += P_INC)
880 				hsw_wrpll_update_rnp(freq2k, budget,
881 						     r2, n2, p, &best);
882 		}
883 	}
884 
885 	*n2_out = best.n2;
886 	*p_out = best.p;
887 	*r2_out = best.r2;
888 }
889 
890 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
891 				  const struct intel_shared_dpll *pll,
892 				  const struct intel_dpll_hw_state *pll_state)
893 {
894 	int refclk;
895 	int n, p, r;
896 	u32 wrpll = pll_state->wrpll;
897 
898 	switch (wrpll & WRPLL_REF_MASK) {
899 	case WRPLL_REF_SPECIAL_HSW:
900 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
901 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
902 			refclk = dev_priv->display.dpll.ref_clks.nssc;
903 			break;
904 		}
905 		fallthrough;
906 	case WRPLL_REF_PCH_SSC:
907 		/*
908 		 * We could calculate spread here, but our checking
909 		 * code only cares about 5% accuracy, and spread is a max of
910 		 * 0.5% downspread.
911 		 */
912 		refclk = dev_priv->display.dpll.ref_clks.ssc;
913 		break;
914 	case WRPLL_REF_LCPLL:
915 		refclk = 2700000;
916 		break;
917 	default:
918 		MISSING_CASE(wrpll);
919 		return 0;
920 	}
921 
922 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
923 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
924 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
925 
926 	/* Convert to KHz, p & r have a fixed point portion */
927 	return (refclk * n / 10) / (p * r) * 2;
928 }
929 
930 static int
931 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
932 			   struct intel_crtc *crtc)
933 {
934 	struct drm_i915_private *i915 = to_i915(state->base.dev);
935 	struct intel_crtc_state *crtc_state =
936 		intel_atomic_get_new_crtc_state(state, crtc);
937 	unsigned int p, n2, r2;
938 
939 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
940 
941 	crtc_state->dpll_hw_state.wrpll =
942 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
943 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
944 		WRPLL_DIVIDER_POST(p);
945 
946 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
947 							&crtc_state->dpll_hw_state);
948 
949 	return 0;
950 }
951 
952 static struct intel_shared_dpll *
953 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
954 		       struct intel_crtc *crtc)
955 {
956 	struct intel_crtc_state *crtc_state =
957 		intel_atomic_get_new_crtc_state(state, crtc);
958 
959 	return intel_find_shared_dpll(state, crtc,
960 				      &crtc_state->dpll_hw_state,
961 				      BIT(DPLL_ID_WRPLL2) |
962 				      BIT(DPLL_ID_WRPLL1));
963 }
964 
965 static int
966 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
967 {
968 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
969 	int clock = crtc_state->port_clock;
970 
971 	switch (clock / 2) {
972 	case 81000:
973 	case 135000:
974 	case 270000:
975 		return 0;
976 	default:
977 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
978 			    clock);
979 		return -EINVAL;
980 	}
981 }
982 
983 static struct intel_shared_dpll *
984 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
985 {
986 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
987 	struct intel_shared_dpll *pll;
988 	enum intel_dpll_id pll_id;
989 	int clock = crtc_state->port_clock;
990 
991 	switch (clock / 2) {
992 	case 81000:
993 		pll_id = DPLL_ID_LCPLL_810;
994 		break;
995 	case 135000:
996 		pll_id = DPLL_ID_LCPLL_1350;
997 		break;
998 	case 270000:
999 		pll_id = DPLL_ID_LCPLL_2700;
1000 		break;
1001 	default:
1002 		MISSING_CASE(clock / 2);
1003 		return NULL;
1004 	}
1005 
1006 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1007 
1008 	if (!pll)
1009 		return NULL;
1010 
1011 	return pll;
1012 }
1013 
1014 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1015 				  const struct intel_shared_dpll *pll,
1016 				  const struct intel_dpll_hw_state *pll_state)
1017 {
1018 	int link_clock = 0;
1019 
1020 	switch (pll->info->id) {
1021 	case DPLL_ID_LCPLL_810:
1022 		link_clock = 81000;
1023 		break;
1024 	case DPLL_ID_LCPLL_1350:
1025 		link_clock = 135000;
1026 		break;
1027 	case DPLL_ID_LCPLL_2700:
1028 		link_clock = 270000;
1029 		break;
1030 	default:
1031 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1032 		break;
1033 	}
1034 
1035 	return link_clock * 2;
1036 }
1037 
1038 static int
1039 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1040 			  struct intel_crtc *crtc)
1041 {
1042 	struct intel_crtc_state *crtc_state =
1043 		intel_atomic_get_new_crtc_state(state, crtc);
1044 
1045 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1046 		return -EINVAL;
1047 
1048 	crtc_state->dpll_hw_state.spll =
1049 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1050 
1051 	return 0;
1052 }
1053 
1054 static struct intel_shared_dpll *
1055 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1056 		      struct intel_crtc *crtc)
1057 {
1058 	struct intel_crtc_state *crtc_state =
1059 		intel_atomic_get_new_crtc_state(state, crtc);
1060 
1061 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1062 				      BIT(DPLL_ID_SPLL));
1063 }
1064 
1065 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1066 				 const struct intel_shared_dpll *pll,
1067 				 const struct intel_dpll_hw_state *pll_state)
1068 {
1069 	int link_clock = 0;
1070 
1071 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1072 	case SPLL_FREQ_810MHz:
1073 		link_clock = 81000;
1074 		break;
1075 	case SPLL_FREQ_1350MHz:
1076 		link_clock = 135000;
1077 		break;
1078 	case SPLL_FREQ_2700MHz:
1079 		link_clock = 270000;
1080 		break;
1081 	default:
1082 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1083 		break;
1084 	}
1085 
1086 	return link_clock * 2;
1087 }
1088 
1089 static int hsw_compute_dpll(struct intel_atomic_state *state,
1090 			    struct intel_crtc *crtc,
1091 			    struct intel_encoder *encoder)
1092 {
1093 	struct intel_crtc_state *crtc_state =
1094 		intel_atomic_get_new_crtc_state(state, crtc);
1095 
1096 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1097 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1098 	else if (intel_crtc_has_dp_encoder(crtc_state))
1099 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1100 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1101 		return hsw_ddi_spll_compute_dpll(state, crtc);
1102 	else
1103 		return -EINVAL;
1104 }
1105 
1106 static int hsw_get_dpll(struct intel_atomic_state *state,
1107 			struct intel_crtc *crtc,
1108 			struct intel_encoder *encoder)
1109 {
1110 	struct intel_crtc_state *crtc_state =
1111 		intel_atomic_get_new_crtc_state(state, crtc);
1112 	struct intel_shared_dpll *pll = NULL;
1113 
1114 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1115 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1116 	else if (intel_crtc_has_dp_encoder(crtc_state))
1117 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1118 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1119 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1120 
1121 	if (!pll)
1122 		return -EINVAL;
1123 
1124 	intel_reference_shared_dpll(state, crtc,
1125 				    pll, &crtc_state->dpll_hw_state);
1126 
1127 	crtc_state->shared_dpll = pll;
1128 
1129 	return 0;
1130 }
1131 
1132 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1133 {
1134 	i915->display.dpll.ref_clks.ssc = 135000;
1135 	/* Non-SSC is only used on non-ULT HSW. */
1136 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1137 		i915->display.dpll.ref_clks.nssc = 24000;
1138 	else
1139 		i915->display.dpll.ref_clks.nssc = 135000;
1140 }
1141 
1142 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1143 			      const struct intel_dpll_hw_state *hw_state)
1144 {
1145 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1146 		    hw_state->wrpll, hw_state->spll);
1147 }
1148 
1149 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1150 	.enable = hsw_ddi_wrpll_enable,
1151 	.disable = hsw_ddi_wrpll_disable,
1152 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1153 	.get_freq = hsw_ddi_wrpll_get_freq,
1154 };
1155 
1156 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1157 	.enable = hsw_ddi_spll_enable,
1158 	.disable = hsw_ddi_spll_disable,
1159 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1160 	.get_freq = hsw_ddi_spll_get_freq,
1161 };
1162 
1163 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1164 				 struct intel_shared_dpll *pll)
1165 {
1166 }
1167 
1168 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1169 				  struct intel_shared_dpll *pll)
1170 {
1171 }
1172 
1173 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1174 				       struct intel_shared_dpll *pll,
1175 				       struct intel_dpll_hw_state *hw_state)
1176 {
1177 	return true;
1178 }
1179 
1180 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1181 	.enable = hsw_ddi_lcpll_enable,
1182 	.disable = hsw_ddi_lcpll_disable,
1183 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1184 	.get_freq = hsw_ddi_lcpll_get_freq,
1185 };
1186 
1187 static const struct dpll_info hsw_plls[] = {
1188 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1189 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1190 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1191 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1192 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1193 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1194 	{ },
1195 };
1196 
1197 static const struct intel_dpll_mgr hsw_pll_mgr = {
1198 	.dpll_info = hsw_plls,
1199 	.compute_dplls = hsw_compute_dpll,
1200 	.get_dplls = hsw_get_dpll,
1201 	.put_dplls = intel_put_dpll,
1202 	.update_ref_clks = hsw_update_dpll_ref_clks,
1203 	.dump_hw_state = hsw_dump_hw_state,
1204 };
1205 
1206 struct skl_dpll_regs {
1207 	i915_reg_t ctl, cfgcr1, cfgcr2;
1208 };
1209 
1210 /* this array is indexed by the *shared* pll id */
1211 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1212 	{
1213 		/* DPLL 0 */
1214 		.ctl = LCPLL1_CTL,
1215 		/* DPLL 0 doesn't support HDMI mode */
1216 	},
1217 	{
1218 		/* DPLL 1 */
1219 		.ctl = LCPLL2_CTL,
1220 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1221 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1222 	},
1223 	{
1224 		/* DPLL 2 */
1225 		.ctl = WRPLL_CTL(0),
1226 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1227 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1228 	},
1229 	{
1230 		/* DPLL 3 */
1231 		.ctl = WRPLL_CTL(1),
1232 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1233 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1234 	},
1235 };
1236 
1237 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1238 				    struct intel_shared_dpll *pll)
1239 {
1240 	const enum intel_dpll_id id = pll->info->id;
1241 	u32 val;
1242 
1243 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1244 
1245 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1246 		 DPLL_CTRL1_SSC(id) |
1247 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1248 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1249 
1250 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1251 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1252 }
1253 
1254 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1255 			       struct intel_shared_dpll *pll)
1256 {
1257 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1258 	const enum intel_dpll_id id = pll->info->id;
1259 
1260 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1261 
1262 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1263 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1264 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1265 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1266 
1267 	/* the enable bit is always bit 31 */
1268 	intel_de_write(dev_priv, regs[id].ctl,
1269 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1270 
1271 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1272 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1273 }
1274 
1275 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1276 				 struct intel_shared_dpll *pll)
1277 {
1278 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1279 }
1280 
1281 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1282 				struct intel_shared_dpll *pll)
1283 {
1284 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1285 	const enum intel_dpll_id id = pll->info->id;
1286 
1287 	/* the enable bit is always bit 31 */
1288 	intel_de_write(dev_priv, regs[id].ctl,
1289 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1290 	intel_de_posting_read(dev_priv, regs[id].ctl);
1291 }
1292 
1293 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1294 				  struct intel_shared_dpll *pll)
1295 {
1296 }
1297 
1298 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1299 				     struct intel_shared_dpll *pll,
1300 				     struct intel_dpll_hw_state *hw_state)
1301 {
1302 	u32 val;
1303 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1304 	const enum intel_dpll_id id = pll->info->id;
1305 	intel_wakeref_t wakeref;
1306 	bool ret;
1307 
1308 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1309 						     POWER_DOMAIN_DISPLAY_CORE);
1310 	if (!wakeref)
1311 		return false;
1312 
1313 	ret = false;
1314 
1315 	val = intel_de_read(dev_priv, regs[id].ctl);
1316 	if (!(val & LCPLL_PLL_ENABLE))
1317 		goto out;
1318 
1319 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1320 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1321 
1322 	/* avoid reading back stale values if HDMI mode is not enabled */
1323 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1324 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1325 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1326 	}
1327 	ret = true;
1328 
1329 out:
1330 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1331 
1332 	return ret;
1333 }
1334 
1335 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1336 				       struct intel_shared_dpll *pll,
1337 				       struct intel_dpll_hw_state *hw_state)
1338 {
1339 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1340 	const enum intel_dpll_id id = pll->info->id;
1341 	intel_wakeref_t wakeref;
1342 	u32 val;
1343 	bool ret;
1344 
1345 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1346 						     POWER_DOMAIN_DISPLAY_CORE);
1347 	if (!wakeref)
1348 		return false;
1349 
1350 	ret = false;
1351 
1352 	/* DPLL0 is always enabled since it drives CDCLK */
1353 	val = intel_de_read(dev_priv, regs[id].ctl);
1354 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1355 		goto out;
1356 
1357 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1358 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1359 
1360 	ret = true;
1361 
1362 out:
1363 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1364 
1365 	return ret;
1366 }
1367 
1368 struct skl_wrpll_context {
1369 	u64 min_deviation;		/* current minimal deviation */
1370 	u64 central_freq;		/* chosen central freq */
1371 	u64 dco_freq;			/* chosen dco freq */
1372 	unsigned int p;			/* chosen divider */
1373 };
1374 
1375 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1376 #define SKL_DCO_MAX_PDEVIATION	100
1377 #define SKL_DCO_MAX_NDEVIATION	600
1378 
1379 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1380 				  u64 central_freq,
1381 				  u64 dco_freq,
1382 				  unsigned int divider)
1383 {
1384 	u64 deviation;
1385 
1386 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1387 			      central_freq);
1388 
1389 	/* positive deviation */
1390 	if (dco_freq >= central_freq) {
1391 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1392 		    deviation < ctx->min_deviation) {
1393 			ctx->min_deviation = deviation;
1394 			ctx->central_freq = central_freq;
1395 			ctx->dco_freq = dco_freq;
1396 			ctx->p = divider;
1397 		}
1398 	/* negative deviation */
1399 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1400 		   deviation < ctx->min_deviation) {
1401 		ctx->min_deviation = deviation;
1402 		ctx->central_freq = central_freq;
1403 		ctx->dco_freq = dco_freq;
1404 		ctx->p = divider;
1405 	}
1406 }
1407 
1408 static void skl_wrpll_get_multipliers(unsigned int p,
1409 				      unsigned int *p0 /* out */,
1410 				      unsigned int *p1 /* out */,
1411 				      unsigned int *p2 /* out */)
1412 {
1413 	/* even dividers */
1414 	if (p % 2 == 0) {
1415 		unsigned int half = p / 2;
1416 
1417 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1418 			*p0 = 2;
1419 			*p1 = 1;
1420 			*p2 = half;
1421 		} else if (half % 2 == 0) {
1422 			*p0 = 2;
1423 			*p1 = half / 2;
1424 			*p2 = 2;
1425 		} else if (half % 3 == 0) {
1426 			*p0 = 3;
1427 			*p1 = half / 3;
1428 			*p2 = 2;
1429 		} else if (half % 7 == 0) {
1430 			*p0 = 7;
1431 			*p1 = half / 7;
1432 			*p2 = 2;
1433 		}
1434 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1435 		*p0 = 3;
1436 		*p1 = 1;
1437 		*p2 = p / 3;
1438 	} else if (p == 5 || p == 7) {
1439 		*p0 = p;
1440 		*p1 = 1;
1441 		*p2 = 1;
1442 	} else if (p == 15) {
1443 		*p0 = 3;
1444 		*p1 = 1;
1445 		*p2 = 5;
1446 	} else if (p == 21) {
1447 		*p0 = 7;
1448 		*p1 = 1;
1449 		*p2 = 3;
1450 	} else if (p == 35) {
1451 		*p0 = 7;
1452 		*p1 = 1;
1453 		*p2 = 5;
1454 	}
1455 }
1456 
1457 struct skl_wrpll_params {
1458 	u32 dco_fraction;
1459 	u32 dco_integer;
1460 	u32 qdiv_ratio;
1461 	u32 qdiv_mode;
1462 	u32 kdiv;
1463 	u32 pdiv;
1464 	u32 central_freq;
1465 };
1466 
1467 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1468 				      u64 afe_clock,
1469 				      int ref_clock,
1470 				      u64 central_freq,
1471 				      u32 p0, u32 p1, u32 p2)
1472 {
1473 	u64 dco_freq;
1474 
1475 	switch (central_freq) {
1476 	case 9600000000ULL:
1477 		params->central_freq = 0;
1478 		break;
1479 	case 9000000000ULL:
1480 		params->central_freq = 1;
1481 		break;
1482 	case 8400000000ULL:
1483 		params->central_freq = 3;
1484 	}
1485 
1486 	switch (p0) {
1487 	case 1:
1488 		params->pdiv = 0;
1489 		break;
1490 	case 2:
1491 		params->pdiv = 1;
1492 		break;
1493 	case 3:
1494 		params->pdiv = 2;
1495 		break;
1496 	case 7:
1497 		params->pdiv = 4;
1498 		break;
1499 	default:
1500 		WARN(1, "Incorrect PDiv\n");
1501 	}
1502 
1503 	switch (p2) {
1504 	case 5:
1505 		params->kdiv = 0;
1506 		break;
1507 	case 2:
1508 		params->kdiv = 1;
1509 		break;
1510 	case 3:
1511 		params->kdiv = 2;
1512 		break;
1513 	case 1:
1514 		params->kdiv = 3;
1515 		break;
1516 	default:
1517 		WARN(1, "Incorrect KDiv\n");
1518 	}
1519 
1520 	params->qdiv_ratio = p1;
1521 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1522 
1523 	dco_freq = p0 * p1 * p2 * afe_clock;
1524 
1525 	/*
1526 	 * Intermediate values are in Hz.
1527 	 * Divide by MHz to match bsepc
1528 	 */
1529 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1530 	params->dco_fraction =
1531 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1532 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1533 }
1534 
1535 static int
1536 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1537 			int ref_clock,
1538 			struct skl_wrpll_params *wrpll_params)
1539 {
1540 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1541 						 9000000000ULL,
1542 						 9600000000ULL };
1543 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1544 					    24, 28, 30, 32, 36, 40, 42, 44,
1545 					    48, 52, 54, 56, 60, 64, 66, 68,
1546 					    70, 72, 76, 78, 80, 84, 88, 90,
1547 					    92, 96, 98 };
1548 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1549 	static const struct {
1550 		const u8 *list;
1551 		int n_dividers;
1552 	} dividers[] = {
1553 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1554 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1555 	};
1556 	struct skl_wrpll_context ctx = {
1557 		.min_deviation = U64_MAX,
1558 	};
1559 	unsigned int dco, d, i;
1560 	unsigned int p0, p1, p2;
1561 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1562 
1563 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1564 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1565 			for (i = 0; i < dividers[d].n_dividers; i++) {
1566 				unsigned int p = dividers[d].list[i];
1567 				u64 dco_freq = p * afe_clock;
1568 
1569 				skl_wrpll_try_divider(&ctx,
1570 						      dco_central_freq[dco],
1571 						      dco_freq,
1572 						      p);
1573 				/*
1574 				 * Skip the remaining dividers if we're sure to
1575 				 * have found the definitive divider, we can't
1576 				 * improve a 0 deviation.
1577 				 */
1578 				if (ctx.min_deviation == 0)
1579 					goto skip_remaining_dividers;
1580 			}
1581 		}
1582 
1583 skip_remaining_dividers:
1584 		/*
1585 		 * If a solution is found with an even divider, prefer
1586 		 * this one.
1587 		 */
1588 		if (d == 0 && ctx.p)
1589 			break;
1590 	}
1591 
1592 	if (!ctx.p)
1593 		return -EINVAL;
1594 
1595 	/*
1596 	 * gcc incorrectly analyses that these can be used without being
1597 	 * initialized. To be fair, it's hard to guess.
1598 	 */
1599 	p0 = p1 = p2 = 0;
1600 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1601 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1602 				  ctx.central_freq, p0, p1, p2);
1603 
1604 	return 0;
1605 }
1606 
1607 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1608 				  const struct intel_shared_dpll *pll,
1609 				  const struct intel_dpll_hw_state *pll_state)
1610 {
1611 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1612 	u32 p0, p1, p2, dco_freq;
1613 
1614 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1615 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1616 
1617 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1618 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1619 	else
1620 		p1 = 1;
1621 
1622 
1623 	switch (p0) {
1624 	case DPLL_CFGCR2_PDIV_1:
1625 		p0 = 1;
1626 		break;
1627 	case DPLL_CFGCR2_PDIV_2:
1628 		p0 = 2;
1629 		break;
1630 	case DPLL_CFGCR2_PDIV_3:
1631 		p0 = 3;
1632 		break;
1633 	case DPLL_CFGCR2_PDIV_7_INVALID:
1634 		/*
1635 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1636 		 * handling it the same way as PDIV_7.
1637 		 */
1638 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1639 		fallthrough;
1640 	case DPLL_CFGCR2_PDIV_7:
1641 		p0 = 7;
1642 		break;
1643 	default:
1644 		MISSING_CASE(p0);
1645 		return 0;
1646 	}
1647 
1648 	switch (p2) {
1649 	case DPLL_CFGCR2_KDIV_5:
1650 		p2 = 5;
1651 		break;
1652 	case DPLL_CFGCR2_KDIV_2:
1653 		p2 = 2;
1654 		break;
1655 	case DPLL_CFGCR2_KDIV_3:
1656 		p2 = 3;
1657 		break;
1658 	case DPLL_CFGCR2_KDIV_1:
1659 		p2 = 1;
1660 		break;
1661 	default:
1662 		MISSING_CASE(p2);
1663 		return 0;
1664 	}
1665 
1666 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1667 		   ref_clock;
1668 
1669 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1670 		    ref_clock / 0x8000;
1671 
1672 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1673 		return 0;
1674 
1675 	return dco_freq / (p0 * p1 * p2 * 5);
1676 }
1677 
1678 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1679 {
1680 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1681 	struct skl_wrpll_params wrpll_params = {};
1682 	u32 ctrl1, cfgcr1, cfgcr2;
1683 	int ret;
1684 
1685 	/*
1686 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1687 	 * as the DPLL id in this function.
1688 	 */
1689 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1690 
1691 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1692 
1693 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1694 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1695 	if (ret)
1696 		return ret;
1697 
1698 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1699 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1700 		wrpll_params.dco_integer;
1701 
1702 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1703 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1704 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1705 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1706 		wrpll_params.central_freq;
1707 
1708 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1709 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1710 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1711 
1712 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1713 							&crtc_state->dpll_hw_state);
1714 
1715 	return 0;
1716 }
1717 
1718 static int
1719 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1720 {
1721 	u32 ctrl1;
1722 
1723 	/*
1724 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1725 	 * as the DPLL id in this function.
1726 	 */
1727 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1728 	switch (crtc_state->port_clock / 2) {
1729 	case 81000:
1730 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1731 		break;
1732 	case 135000:
1733 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1734 		break;
1735 	case 270000:
1736 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1737 		break;
1738 		/* eDP 1.4 rates */
1739 	case 162000:
1740 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1741 		break;
1742 	case 108000:
1743 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1744 		break;
1745 	case 216000:
1746 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1747 		break;
1748 	}
1749 
1750 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1751 
1752 	return 0;
1753 }
1754 
1755 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1756 				  const struct intel_shared_dpll *pll,
1757 				  const struct intel_dpll_hw_state *pll_state)
1758 {
1759 	int link_clock = 0;
1760 
1761 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1762 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1763 	case DPLL_CTRL1_LINK_RATE_810:
1764 		link_clock = 81000;
1765 		break;
1766 	case DPLL_CTRL1_LINK_RATE_1080:
1767 		link_clock = 108000;
1768 		break;
1769 	case DPLL_CTRL1_LINK_RATE_1350:
1770 		link_clock = 135000;
1771 		break;
1772 	case DPLL_CTRL1_LINK_RATE_1620:
1773 		link_clock = 162000;
1774 		break;
1775 	case DPLL_CTRL1_LINK_RATE_2160:
1776 		link_clock = 216000;
1777 		break;
1778 	case DPLL_CTRL1_LINK_RATE_2700:
1779 		link_clock = 270000;
1780 		break;
1781 	default:
1782 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1783 		break;
1784 	}
1785 
1786 	return link_clock * 2;
1787 }
1788 
1789 static int skl_compute_dpll(struct intel_atomic_state *state,
1790 			    struct intel_crtc *crtc,
1791 			    struct intel_encoder *encoder)
1792 {
1793 	struct intel_crtc_state *crtc_state =
1794 		intel_atomic_get_new_crtc_state(state, crtc);
1795 
1796 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1797 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1798 	else if (intel_crtc_has_dp_encoder(crtc_state))
1799 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1800 	else
1801 		return -EINVAL;
1802 }
1803 
1804 static int skl_get_dpll(struct intel_atomic_state *state,
1805 			struct intel_crtc *crtc,
1806 			struct intel_encoder *encoder)
1807 {
1808 	struct intel_crtc_state *crtc_state =
1809 		intel_atomic_get_new_crtc_state(state, crtc);
1810 	struct intel_shared_dpll *pll;
1811 
1812 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1813 		pll = intel_find_shared_dpll(state, crtc,
1814 					     &crtc_state->dpll_hw_state,
1815 					     BIT(DPLL_ID_SKL_DPLL0));
1816 	else
1817 		pll = intel_find_shared_dpll(state, crtc,
1818 					     &crtc_state->dpll_hw_state,
1819 					     BIT(DPLL_ID_SKL_DPLL3) |
1820 					     BIT(DPLL_ID_SKL_DPLL2) |
1821 					     BIT(DPLL_ID_SKL_DPLL1));
1822 	if (!pll)
1823 		return -EINVAL;
1824 
1825 	intel_reference_shared_dpll(state, crtc,
1826 				    pll, &crtc_state->dpll_hw_state);
1827 
1828 	crtc_state->shared_dpll = pll;
1829 
1830 	return 0;
1831 }
1832 
1833 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1834 				const struct intel_shared_dpll *pll,
1835 				const struct intel_dpll_hw_state *pll_state)
1836 {
1837 	/*
1838 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1839 	 * the internal shift for each field
1840 	 */
1841 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1842 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1843 	else
1844 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1845 }
1846 
1847 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1848 {
1849 	/* No SSC ref */
1850 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1851 }
1852 
1853 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1854 			      const struct intel_dpll_hw_state *hw_state)
1855 {
1856 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1857 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1858 		      hw_state->ctrl1,
1859 		      hw_state->cfgcr1,
1860 		      hw_state->cfgcr2);
1861 }
1862 
1863 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1864 	.enable = skl_ddi_pll_enable,
1865 	.disable = skl_ddi_pll_disable,
1866 	.get_hw_state = skl_ddi_pll_get_hw_state,
1867 	.get_freq = skl_ddi_pll_get_freq,
1868 };
1869 
1870 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1871 	.enable = skl_ddi_dpll0_enable,
1872 	.disable = skl_ddi_dpll0_disable,
1873 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1874 	.get_freq = skl_ddi_pll_get_freq,
1875 };
1876 
1877 static const struct dpll_info skl_plls[] = {
1878 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1879 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1880 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1881 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1882 	{ },
1883 };
1884 
1885 static const struct intel_dpll_mgr skl_pll_mgr = {
1886 	.dpll_info = skl_plls,
1887 	.compute_dplls = skl_compute_dpll,
1888 	.get_dplls = skl_get_dpll,
1889 	.put_dplls = intel_put_dpll,
1890 	.update_ref_clks = skl_update_dpll_ref_clks,
1891 	.dump_hw_state = skl_dump_hw_state,
1892 };
1893 
1894 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1895 				struct intel_shared_dpll *pll)
1896 {
1897 	u32 temp;
1898 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1899 	enum dpio_phy phy;
1900 	enum dpio_channel ch;
1901 
1902 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1903 
1904 	/* Non-SSC reference */
1905 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1906 	temp |= PORT_PLL_REF_SEL;
1907 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1908 
1909 	if (IS_GEMINILAKE(dev_priv)) {
1910 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1911 		temp |= PORT_PLL_POWER_ENABLE;
1912 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1913 
1914 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1915 				 PORT_PLL_POWER_STATE), 200))
1916 			drm_err(&dev_priv->drm,
1917 				"Power state not set for PLL:%d\n", port);
1918 	}
1919 
1920 	/* Disable 10 bit clock */
1921 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1922 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1923 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1924 
1925 	/* Write P1 & P2 */
1926 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1927 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1928 	temp |= pll->state.hw_state.ebb0;
1929 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1930 
1931 	/* Write M2 integer */
1932 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1933 	temp &= ~PORT_PLL_M2_INT_MASK;
1934 	temp |= pll->state.hw_state.pll0;
1935 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1936 
1937 	/* Write N */
1938 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1939 	temp &= ~PORT_PLL_N_MASK;
1940 	temp |= pll->state.hw_state.pll1;
1941 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1942 
1943 	/* Write M2 fraction */
1944 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1945 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1946 	temp |= pll->state.hw_state.pll2;
1947 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1948 
1949 	/* Write M2 fraction enable */
1950 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1951 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1952 	temp |= pll->state.hw_state.pll3;
1953 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1954 
1955 	/* Write coeff */
1956 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1957 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1958 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1959 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1960 	temp |= pll->state.hw_state.pll6;
1961 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1962 
1963 	/* Write calibration val */
1964 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1965 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1966 	temp |= pll->state.hw_state.pll8;
1967 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1968 
1969 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1970 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1971 	temp |= pll->state.hw_state.pll9;
1972 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1973 
1974 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1975 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1976 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1977 	temp |= pll->state.hw_state.pll10;
1978 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1979 
1980 	/* Recalibrate with new settings */
1981 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1982 	temp |= PORT_PLL_RECALIBRATE;
1983 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1984 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1985 	temp |= pll->state.hw_state.ebb4;
1986 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1987 
1988 	/* Enable PLL */
1989 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1990 	temp |= PORT_PLL_ENABLE;
1991 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1992 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1993 
1994 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1995 			200))
1996 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1997 
1998 	if (IS_GEMINILAKE(dev_priv)) {
1999 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2000 		temp |= DCC_DELAY_RANGE_2;
2001 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2002 	}
2003 
2004 	/*
2005 	 * While we write to the group register to program all lanes at once we
2006 	 * can read only lane registers and we pick lanes 0/1 for that.
2007 	 */
2008 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2009 	temp &= ~LANE_STAGGER_MASK;
2010 	temp &= ~LANESTAGGER_STRAP_OVRD;
2011 	temp |= pll->state.hw_state.pcsdw12;
2012 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2013 }
2014 
2015 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2016 					struct intel_shared_dpll *pll)
2017 {
2018 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2019 	u32 temp;
2020 
2021 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2022 	temp &= ~PORT_PLL_ENABLE;
2023 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2024 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2025 
2026 	if (IS_GEMINILAKE(dev_priv)) {
2027 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2028 		temp &= ~PORT_PLL_POWER_ENABLE;
2029 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2030 
2031 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2032 				  PORT_PLL_POWER_STATE), 200))
2033 			drm_err(&dev_priv->drm,
2034 				"Power state not reset for PLL:%d\n", port);
2035 	}
2036 }
2037 
2038 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2039 					struct intel_shared_dpll *pll,
2040 					struct intel_dpll_hw_state *hw_state)
2041 {
2042 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2043 	intel_wakeref_t wakeref;
2044 	enum dpio_phy phy;
2045 	enum dpio_channel ch;
2046 	u32 val;
2047 	bool ret;
2048 
2049 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2050 
2051 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2052 						     POWER_DOMAIN_DISPLAY_CORE);
2053 	if (!wakeref)
2054 		return false;
2055 
2056 	ret = false;
2057 
2058 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2059 	if (!(val & PORT_PLL_ENABLE))
2060 		goto out;
2061 
2062 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2063 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2064 
2065 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2066 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2067 
2068 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2069 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2070 
2071 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2072 	hw_state->pll1 &= PORT_PLL_N_MASK;
2073 
2074 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2075 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2076 
2077 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2078 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2079 
2080 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2081 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2082 			  PORT_PLL_INT_COEFF_MASK |
2083 			  PORT_PLL_GAIN_CTL_MASK;
2084 
2085 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2086 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2087 
2088 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2089 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2090 
2091 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2092 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2093 			   PORT_PLL_DCO_AMP_MASK;
2094 
2095 	/*
2096 	 * While we write to the group register to program all lanes at once we
2097 	 * can read only lane registers. We configure all lanes the same way, so
2098 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2099 	 */
2100 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2101 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2102 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2103 		drm_dbg(&dev_priv->drm,
2104 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2105 			hw_state->pcsdw12,
2106 			intel_de_read(dev_priv,
2107 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2108 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2109 
2110 	ret = true;
2111 
2112 out:
2113 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2114 
2115 	return ret;
2116 }
2117 
2118 /* pre-calculated values for DP linkrates */
2119 static const struct dpll bxt_dp_clk_val[] = {
2120 	/* m2 is .22 binary fixed point */
2121 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2122 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2123 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2124 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2125 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2126 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2127 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2128 };
2129 
2130 static int
2131 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2132 			  struct dpll *clk_div)
2133 {
2134 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2135 
2136 	/* Calculate HDMI div */
2137 	/*
2138 	 * FIXME: tie the following calculation into
2139 	 * i9xx_crtc_compute_clock
2140 	 */
2141 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2142 		return -EINVAL;
2143 
2144 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2145 
2146 	return 0;
2147 }
2148 
2149 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2150 				    struct dpll *clk_div)
2151 {
2152 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2153 	int i;
2154 
2155 	*clk_div = bxt_dp_clk_val[0];
2156 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2157 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2158 			*clk_div = bxt_dp_clk_val[i];
2159 			break;
2160 		}
2161 	}
2162 
2163 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2164 
2165 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2166 		    clk_div->dot != crtc_state->port_clock);
2167 }
2168 
2169 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2170 				     const struct dpll *clk_div)
2171 {
2172 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2173 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2174 	int clock = crtc_state->port_clock;
2175 	int vco = clk_div->vco;
2176 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2177 	u32 lanestagger;
2178 
2179 	if (vco >= 6200000 && vco <= 6700000) {
2180 		prop_coef = 4;
2181 		int_coef = 9;
2182 		gain_ctl = 3;
2183 		targ_cnt = 8;
2184 	} else if ((vco > 5400000 && vco < 6200000) ||
2185 			(vco >= 4800000 && vco < 5400000)) {
2186 		prop_coef = 5;
2187 		int_coef = 11;
2188 		gain_ctl = 3;
2189 		targ_cnt = 9;
2190 	} else if (vco == 5400000) {
2191 		prop_coef = 3;
2192 		int_coef = 8;
2193 		gain_ctl = 1;
2194 		targ_cnt = 9;
2195 	} else {
2196 		drm_err(&i915->drm, "Invalid VCO\n");
2197 		return -EINVAL;
2198 	}
2199 
2200 	if (clock > 270000)
2201 		lanestagger = 0x18;
2202 	else if (clock > 135000)
2203 		lanestagger = 0x0d;
2204 	else if (clock > 67000)
2205 		lanestagger = 0x07;
2206 	else if (clock > 33000)
2207 		lanestagger = 0x04;
2208 	else
2209 		lanestagger = 0x02;
2210 
2211 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2212 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2213 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2214 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2215 
2216 	if (clk_div->m2 & 0x3fffff)
2217 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2218 
2219 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2220 		PORT_PLL_INT_COEFF(int_coef) |
2221 		PORT_PLL_GAIN_CTL(gain_ctl);
2222 
2223 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2224 
2225 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2226 
2227 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2228 		PORT_PLL_DCO_AMP_OVR_EN_H;
2229 
2230 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2231 
2232 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2233 
2234 	return 0;
2235 }
2236 
2237 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2238 				const struct intel_shared_dpll *pll,
2239 				const struct intel_dpll_hw_state *pll_state)
2240 {
2241 	struct dpll clock;
2242 
2243 	clock.m1 = 2;
2244 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2245 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2246 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2247 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2248 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2249 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2250 
2251 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2252 }
2253 
2254 static int
2255 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2256 {
2257 	struct dpll clk_div = {};
2258 
2259 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2260 
2261 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2262 }
2263 
2264 static int
2265 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2266 {
2267 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2268 	struct dpll clk_div = {};
2269 	int ret;
2270 
2271 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2272 
2273 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2274 	if (ret)
2275 		return ret;
2276 
2277 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2278 						      &crtc_state->dpll_hw_state);
2279 
2280 	return 0;
2281 }
2282 
2283 static int bxt_compute_dpll(struct intel_atomic_state *state,
2284 			    struct intel_crtc *crtc,
2285 			    struct intel_encoder *encoder)
2286 {
2287 	struct intel_crtc_state *crtc_state =
2288 		intel_atomic_get_new_crtc_state(state, crtc);
2289 
2290 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2291 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2292 	else if (intel_crtc_has_dp_encoder(crtc_state))
2293 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2294 	else
2295 		return -EINVAL;
2296 }
2297 
2298 static int bxt_get_dpll(struct intel_atomic_state *state,
2299 			struct intel_crtc *crtc,
2300 			struct intel_encoder *encoder)
2301 {
2302 	struct intel_crtc_state *crtc_state =
2303 		intel_atomic_get_new_crtc_state(state, crtc);
2304 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2305 	struct intel_shared_dpll *pll;
2306 	enum intel_dpll_id id;
2307 
2308 	/* 1:1 mapping between ports and PLLs */
2309 	id = (enum intel_dpll_id) encoder->port;
2310 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2311 
2312 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2313 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2314 
2315 	intel_reference_shared_dpll(state, crtc,
2316 				    pll, &crtc_state->dpll_hw_state);
2317 
2318 	crtc_state->shared_dpll = pll;
2319 
2320 	return 0;
2321 }
2322 
2323 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2324 {
2325 	i915->display.dpll.ref_clks.ssc = 100000;
2326 	i915->display.dpll.ref_clks.nssc = 100000;
2327 	/* DSI non-SSC ref 19.2MHz */
2328 }
2329 
2330 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2331 			      const struct intel_dpll_hw_state *hw_state)
2332 {
2333 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2334 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2335 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2336 		    hw_state->ebb0,
2337 		    hw_state->ebb4,
2338 		    hw_state->pll0,
2339 		    hw_state->pll1,
2340 		    hw_state->pll2,
2341 		    hw_state->pll3,
2342 		    hw_state->pll6,
2343 		    hw_state->pll8,
2344 		    hw_state->pll9,
2345 		    hw_state->pll10,
2346 		    hw_state->pcsdw12);
2347 }
2348 
2349 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2350 	.enable = bxt_ddi_pll_enable,
2351 	.disable = bxt_ddi_pll_disable,
2352 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2353 	.get_freq = bxt_ddi_pll_get_freq,
2354 };
2355 
2356 static const struct dpll_info bxt_plls[] = {
2357 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2358 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2359 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2360 	{ },
2361 };
2362 
2363 static const struct intel_dpll_mgr bxt_pll_mgr = {
2364 	.dpll_info = bxt_plls,
2365 	.compute_dplls = bxt_compute_dpll,
2366 	.get_dplls = bxt_get_dpll,
2367 	.put_dplls = intel_put_dpll,
2368 	.update_ref_clks = bxt_update_dpll_ref_clks,
2369 	.dump_hw_state = bxt_dump_hw_state,
2370 };
2371 
2372 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2373 				      int *qdiv, int *kdiv)
2374 {
2375 	/* even dividers */
2376 	if (bestdiv % 2 == 0) {
2377 		if (bestdiv == 2) {
2378 			*pdiv = 2;
2379 			*qdiv = 1;
2380 			*kdiv = 1;
2381 		} else if (bestdiv % 4 == 0) {
2382 			*pdiv = 2;
2383 			*qdiv = bestdiv / 4;
2384 			*kdiv = 2;
2385 		} else if (bestdiv % 6 == 0) {
2386 			*pdiv = 3;
2387 			*qdiv = bestdiv / 6;
2388 			*kdiv = 2;
2389 		} else if (bestdiv % 5 == 0) {
2390 			*pdiv = 5;
2391 			*qdiv = bestdiv / 10;
2392 			*kdiv = 2;
2393 		} else if (bestdiv % 14 == 0) {
2394 			*pdiv = 7;
2395 			*qdiv = bestdiv / 14;
2396 			*kdiv = 2;
2397 		}
2398 	} else {
2399 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2400 			*pdiv = bestdiv;
2401 			*qdiv = 1;
2402 			*kdiv = 1;
2403 		} else { /* 9, 15, 21 */
2404 			*pdiv = bestdiv / 3;
2405 			*qdiv = 1;
2406 			*kdiv = 3;
2407 		}
2408 	}
2409 }
2410 
2411 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2412 				      u32 dco_freq, u32 ref_freq,
2413 				      int pdiv, int qdiv, int kdiv)
2414 {
2415 	u32 dco;
2416 
2417 	switch (kdiv) {
2418 	case 1:
2419 		params->kdiv = 1;
2420 		break;
2421 	case 2:
2422 		params->kdiv = 2;
2423 		break;
2424 	case 3:
2425 		params->kdiv = 4;
2426 		break;
2427 	default:
2428 		WARN(1, "Incorrect KDiv\n");
2429 	}
2430 
2431 	switch (pdiv) {
2432 	case 2:
2433 		params->pdiv = 1;
2434 		break;
2435 	case 3:
2436 		params->pdiv = 2;
2437 		break;
2438 	case 5:
2439 		params->pdiv = 4;
2440 		break;
2441 	case 7:
2442 		params->pdiv = 8;
2443 		break;
2444 	default:
2445 		WARN(1, "Incorrect PDiv\n");
2446 	}
2447 
2448 	WARN_ON(kdiv != 2 && qdiv != 1);
2449 
2450 	params->qdiv_ratio = qdiv;
2451 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2452 
2453 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2454 
2455 	params->dco_integer = dco >> 15;
2456 	params->dco_fraction = dco & 0x7fff;
2457 }
2458 
2459 /*
2460  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2461  * Program half of the nominal DCO divider fraction value.
2462  */
2463 static bool
2464 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2465 {
2466 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2467 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2468 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2469 		 i915->display.dpll.ref_clks.nssc == 38400;
2470 }
2471 
2472 struct icl_combo_pll_params {
2473 	int clock;
2474 	struct skl_wrpll_params wrpll;
2475 };
2476 
2477 /*
2478  * These values alrea already adjusted: they're the bits we write to the
2479  * registers, not the logical values.
2480  */
2481 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2482 	{ 540000,
2483 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2484 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 	{ 270000,
2486 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2487 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2488 	{ 162000,
2489 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2490 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2491 	{ 324000,
2492 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2493 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 	{ 216000,
2495 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2496 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2497 	{ 432000,
2498 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2499 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 	{ 648000,
2501 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2502 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 	{ 810000,
2504 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2505 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506 };
2507 
2508 
2509 /* Also used for 38.4 MHz values. */
2510 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2511 	{ 540000,
2512 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2513 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2514 	{ 270000,
2515 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2516 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2517 	{ 162000,
2518 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2519 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2520 	{ 324000,
2521 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2522 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2523 	{ 216000,
2524 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2525 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2526 	{ 432000,
2527 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2528 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2529 	{ 648000,
2530 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2531 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2532 	{ 810000,
2533 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2534 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2535 };
2536 
2537 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2538 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2539 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2540 };
2541 
2542 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2543 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2544 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2545 };
2546 
2547 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2548 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2549 	/* the following params are unused */
2550 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2551 };
2552 
2553 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2554 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2555 	/* the following params are unused */
2556 };
2557 
2558 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2559 				 struct skl_wrpll_params *pll_params)
2560 {
2561 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2562 	const struct icl_combo_pll_params *params =
2563 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2564 		icl_dp_combo_pll_24MHz_values :
2565 		icl_dp_combo_pll_19_2MHz_values;
2566 	int clock = crtc_state->port_clock;
2567 	int i;
2568 
2569 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2570 		if (clock == params[i].clock) {
2571 			*pll_params = params[i].wrpll;
2572 			return 0;
2573 		}
2574 	}
2575 
2576 	MISSING_CASE(clock);
2577 	return -EINVAL;
2578 }
2579 
2580 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2581 			    struct skl_wrpll_params *pll_params)
2582 {
2583 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2584 
2585 	if (DISPLAY_VER(dev_priv) >= 12) {
2586 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2587 		default:
2588 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2589 			fallthrough;
2590 		case 19200:
2591 		case 38400:
2592 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2593 			break;
2594 		case 24000:
2595 			*pll_params = tgl_tbt_pll_24MHz_values;
2596 			break;
2597 		}
2598 	} else {
2599 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2600 		default:
2601 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2602 			fallthrough;
2603 		case 19200:
2604 		case 38400:
2605 			*pll_params = icl_tbt_pll_19_2MHz_values;
2606 			break;
2607 		case 24000:
2608 			*pll_params = icl_tbt_pll_24MHz_values;
2609 			break;
2610 		}
2611 	}
2612 
2613 	return 0;
2614 }
2615 
2616 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2617 				    const struct intel_shared_dpll *pll,
2618 				    const struct intel_dpll_hw_state *pll_state)
2619 {
2620 	/*
2621 	 * The PLL outputs multiple frequencies at the same time, selection is
2622 	 * made at DDI clock mux level.
2623 	 */
2624 	drm_WARN_ON(&i915->drm, 1);
2625 
2626 	return 0;
2627 }
2628 
2629 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2630 {
2631 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2632 
2633 	/*
2634 	 * For ICL+, the spec states: if reference frequency is 38.4,
2635 	 * use 19.2 because the DPLL automatically divides that by 2.
2636 	 */
2637 	if (ref_clock == 38400)
2638 		ref_clock = 19200;
2639 
2640 	return ref_clock;
2641 }
2642 
2643 static int
2644 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2645 	       struct skl_wrpll_params *wrpll_params)
2646 {
2647 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2648 	int ref_clock = icl_wrpll_ref_clock(i915);
2649 	u32 afe_clock = crtc_state->port_clock * 5;
2650 	u32 dco_min = 7998000;
2651 	u32 dco_max = 10000000;
2652 	u32 dco_mid = (dco_min + dco_max) / 2;
2653 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2654 					 18, 20, 24, 28, 30, 32,  36,  40,
2655 					 42, 44, 48, 50, 52, 54,  56,  60,
2656 					 64, 66, 68, 70, 72, 76,  78,  80,
2657 					 84, 88, 90, 92, 96, 98, 100, 102,
2658 					  3,  5,  7,  9, 15, 21 };
2659 	u32 dco, best_dco = 0, dco_centrality = 0;
2660 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2661 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2662 
2663 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2664 		dco = afe_clock * dividers[d];
2665 
2666 		if (dco <= dco_max && dco >= dco_min) {
2667 			dco_centrality = abs(dco - dco_mid);
2668 
2669 			if (dco_centrality < best_dco_centrality) {
2670 				best_dco_centrality = dco_centrality;
2671 				best_div = dividers[d];
2672 				best_dco = dco;
2673 			}
2674 		}
2675 	}
2676 
2677 	if (best_div == 0)
2678 		return -EINVAL;
2679 
2680 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2681 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2682 				  pdiv, qdiv, kdiv);
2683 
2684 	return 0;
2685 }
2686 
2687 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2688 				      const struct intel_shared_dpll *pll,
2689 				      const struct intel_dpll_hw_state *pll_state)
2690 {
2691 	int ref_clock = icl_wrpll_ref_clock(i915);
2692 	u32 dco_fraction;
2693 	u32 p0, p1, p2, dco_freq;
2694 
2695 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2696 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2697 
2698 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2699 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2700 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2701 	else
2702 		p1 = 1;
2703 
2704 	switch (p0) {
2705 	case DPLL_CFGCR1_PDIV_2:
2706 		p0 = 2;
2707 		break;
2708 	case DPLL_CFGCR1_PDIV_3:
2709 		p0 = 3;
2710 		break;
2711 	case DPLL_CFGCR1_PDIV_5:
2712 		p0 = 5;
2713 		break;
2714 	case DPLL_CFGCR1_PDIV_7:
2715 		p0 = 7;
2716 		break;
2717 	}
2718 
2719 	switch (p2) {
2720 	case DPLL_CFGCR1_KDIV_1:
2721 		p2 = 1;
2722 		break;
2723 	case DPLL_CFGCR1_KDIV_2:
2724 		p2 = 2;
2725 		break;
2726 	case DPLL_CFGCR1_KDIV_3:
2727 		p2 = 3;
2728 		break;
2729 	}
2730 
2731 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2732 		   ref_clock;
2733 
2734 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2735 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2736 
2737 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2738 		dco_fraction *= 2;
2739 
2740 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2741 
2742 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2743 		return 0;
2744 
2745 	return dco_freq / (p0 * p1 * p2 * 5);
2746 }
2747 
2748 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2749 				const struct skl_wrpll_params *pll_params,
2750 				struct intel_dpll_hw_state *pll_state)
2751 {
2752 	u32 dco_fraction = pll_params->dco_fraction;
2753 
2754 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2755 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2756 
2757 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2758 			    pll_params->dco_integer;
2759 
2760 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2761 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2762 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2763 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2764 
2765 	if (DISPLAY_VER(i915) >= 12)
2766 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2767 	else
2768 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2769 
2770 	if (i915->display.vbt.override_afc_startup)
2771 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2772 }
2773 
2774 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2775 				    u32 *target_dco_khz,
2776 				    struct intel_dpll_hw_state *state,
2777 				    bool is_dkl)
2778 {
2779 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2780 	u32 dco_min_freq, dco_max_freq;
2781 	unsigned int i;
2782 	int div2;
2783 
2784 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2785 	dco_max_freq = is_dp ? 8100000 : 10000000;
2786 
2787 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2788 		int div1 = div1_vals[i];
2789 
2790 		for (div2 = 10; div2 > 0; div2--) {
2791 			int dco = div1 * div2 * clock_khz * 5;
2792 			int a_divratio, tlinedrv, inputsel;
2793 			u32 hsdiv;
2794 
2795 			if (dco < dco_min_freq || dco > dco_max_freq)
2796 				continue;
2797 
2798 			if (div2 >= 2) {
2799 				/*
2800 				 * Note: a_divratio not matching TGL BSpec
2801 				 * algorithm but matching hardcoded values and
2802 				 * working on HW for DP alt-mode at least
2803 				 */
2804 				a_divratio = is_dp ? 10 : 5;
2805 				tlinedrv = is_dkl ? 1 : 2;
2806 			} else {
2807 				a_divratio = 5;
2808 				tlinedrv = 0;
2809 			}
2810 			inputsel = is_dp ? 0 : 1;
2811 
2812 			switch (div1) {
2813 			default:
2814 				MISSING_CASE(div1);
2815 				fallthrough;
2816 			case 2:
2817 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2818 				break;
2819 			case 3:
2820 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2821 				break;
2822 			case 5:
2823 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2824 				break;
2825 			case 7:
2826 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2827 				break;
2828 			}
2829 
2830 			*target_dco_khz = dco;
2831 
2832 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2833 
2834 			state->mg_clktop2_coreclkctl1 =
2835 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2836 
2837 			state->mg_clktop2_hsclkctl =
2838 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2839 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2840 				hsdiv |
2841 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2842 
2843 			return 0;
2844 		}
2845 	}
2846 
2847 	return -EINVAL;
2848 }
2849 
2850 /*
2851  * The specification for this function uses real numbers, so the math had to be
2852  * adapted to integer-only calculation, that's why it looks so different.
2853  */
2854 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2855 				 struct intel_dpll_hw_state *pll_state)
2856 {
2857 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2858 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2859 	int clock = crtc_state->port_clock;
2860 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2861 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2862 	u32 prop_coeff, int_coeff;
2863 	u32 tdc_targetcnt, feedfwgain;
2864 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2865 	u64 tmp;
2866 	bool use_ssc = false;
2867 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2868 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2869 	int ret;
2870 
2871 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2872 				       pll_state, is_dkl);
2873 	if (ret)
2874 		return ret;
2875 
2876 	m1div = 2;
2877 	m2div_int = dco_khz / (refclk_khz * m1div);
2878 	if (m2div_int > 255) {
2879 		if (!is_dkl) {
2880 			m1div = 4;
2881 			m2div_int = dco_khz / (refclk_khz * m1div);
2882 		}
2883 
2884 		if (m2div_int > 255)
2885 			return -EINVAL;
2886 	}
2887 	m2div_rem = dco_khz % (refclk_khz * m1div);
2888 
2889 	tmp = (u64)m2div_rem * (1 << 22);
2890 	do_div(tmp, refclk_khz * m1div);
2891 	m2div_frac = tmp;
2892 
2893 	switch (refclk_khz) {
2894 	case 19200:
2895 		iref_ndiv = 1;
2896 		iref_trim = 28;
2897 		iref_pulse_w = 1;
2898 		break;
2899 	case 24000:
2900 		iref_ndiv = 1;
2901 		iref_trim = 25;
2902 		iref_pulse_w = 2;
2903 		break;
2904 	case 38400:
2905 		iref_ndiv = 2;
2906 		iref_trim = 28;
2907 		iref_pulse_w = 1;
2908 		break;
2909 	default:
2910 		MISSING_CASE(refclk_khz);
2911 		return -EINVAL;
2912 	}
2913 
2914 	/*
2915 	 * tdc_res = 0.000003
2916 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2917 	 *
2918 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2919 	 * was supposed to be a division, but we rearranged the operations of
2920 	 * the formula to avoid early divisions so we don't multiply the
2921 	 * rounding errors.
2922 	 *
2923 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2924 	 * we also rearrange to work with integers.
2925 	 *
2926 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2927 	 * last division by 10.
2928 	 */
2929 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2930 
2931 	/*
2932 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2933 	 * 32 bits. That's not a problem since we round the division down
2934 	 * anyway.
2935 	 */
2936 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2937 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2938 
2939 	if (dco_khz >= 9000000) {
2940 		prop_coeff = 5;
2941 		int_coeff = 10;
2942 	} else {
2943 		prop_coeff = 4;
2944 		int_coeff = 8;
2945 	}
2946 
2947 	if (use_ssc) {
2948 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2949 		do_div(tmp, refclk_khz * m1div * 10000);
2950 		ssc_stepsize = tmp;
2951 
2952 		tmp = mul_u32_u32(dco_khz, 1000);
2953 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2954 	} else {
2955 		ssc_stepsize = 0;
2956 		ssc_steplen = 0;
2957 	}
2958 	ssc_steplog = 4;
2959 
2960 	/* write pll_state calculations */
2961 	if (is_dkl) {
2962 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2963 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2964 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2965 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2966 		if (dev_priv->display.vbt.override_afc_startup) {
2967 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2968 
2969 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2970 		}
2971 
2972 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2973 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2974 
2975 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2976 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2977 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2978 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2979 
2980 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2981 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2982 
2983 		pll_state->mg_pll_tdc_coldst_bias =
2984 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2985 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2986 
2987 	} else {
2988 		pll_state->mg_pll_div0 =
2989 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2990 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2991 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2992 
2993 		pll_state->mg_pll_div1 =
2994 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2995 			MG_PLL_DIV1_DITHER_DIV_2 |
2996 			MG_PLL_DIV1_NDIVRATIO(1) |
2997 			MG_PLL_DIV1_FBPREDIV(m1div);
2998 
2999 		pll_state->mg_pll_lf =
3000 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3001 			MG_PLL_LF_AFCCNTSEL_512 |
3002 			MG_PLL_LF_GAINCTRL(1) |
3003 			MG_PLL_LF_INT_COEFF(int_coeff) |
3004 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3005 
3006 		pll_state->mg_pll_frac_lock =
3007 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3008 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3009 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3010 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3011 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3012 		if (use_ssc || m2div_rem > 0)
3013 			pll_state->mg_pll_frac_lock |=
3014 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3015 
3016 		pll_state->mg_pll_ssc =
3017 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3018 			MG_PLL_SSC_TYPE(2) |
3019 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3020 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3021 			MG_PLL_SSC_FLLEN |
3022 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3023 
3024 		pll_state->mg_pll_tdc_coldst_bias =
3025 			MG_PLL_TDC_COLDST_COLDSTART |
3026 			MG_PLL_TDC_COLDST_IREFINT_EN |
3027 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3028 			MG_PLL_TDC_TDCOVCCORR_EN |
3029 			MG_PLL_TDC_TDCSEL(3);
3030 
3031 		pll_state->mg_pll_bias =
3032 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3033 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3034 			MG_PLL_BIAS_BIAS_BONUS(10) |
3035 			MG_PLL_BIAS_BIASCAL_EN |
3036 			MG_PLL_BIAS_CTRIM(12) |
3037 			MG_PLL_BIAS_VREF_RDAC(4) |
3038 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3039 
3040 		if (refclk_khz == 38400) {
3041 			pll_state->mg_pll_tdc_coldst_bias_mask =
3042 				MG_PLL_TDC_COLDST_COLDSTART;
3043 			pll_state->mg_pll_bias_mask = 0;
3044 		} else {
3045 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3046 			pll_state->mg_pll_bias_mask = -1U;
3047 		}
3048 
3049 		pll_state->mg_pll_tdc_coldst_bias &=
3050 			pll_state->mg_pll_tdc_coldst_bias_mask;
3051 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3052 	}
3053 
3054 	return 0;
3055 }
3056 
3057 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3058 				   const struct intel_shared_dpll *pll,
3059 				   const struct intel_dpll_hw_state *pll_state)
3060 {
3061 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3062 	u64 tmp;
3063 
3064 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3065 
3066 	if (DISPLAY_VER(dev_priv) >= 12) {
3067 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3068 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3069 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3070 
3071 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3072 			m2_frac = pll_state->mg_pll_bias &
3073 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3074 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3075 		} else {
3076 			m2_frac = 0;
3077 		}
3078 	} else {
3079 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3080 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3081 
3082 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3083 			m2_frac = pll_state->mg_pll_div0 &
3084 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3085 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3086 		} else {
3087 			m2_frac = 0;
3088 		}
3089 	}
3090 
3091 	switch (pll_state->mg_clktop2_hsclkctl &
3092 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3093 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3094 		div1 = 2;
3095 		break;
3096 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3097 		div1 = 3;
3098 		break;
3099 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3100 		div1 = 5;
3101 		break;
3102 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3103 		div1 = 7;
3104 		break;
3105 	default:
3106 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3107 		return 0;
3108 	}
3109 
3110 	div2 = (pll_state->mg_clktop2_hsclkctl &
3111 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3112 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3113 
3114 	/* div2 value of 0 is same as 1 means no div */
3115 	if (div2 == 0)
3116 		div2 = 1;
3117 
3118 	/*
3119 	 * Adjust the original formula to delay the division by 2^22 in order to
3120 	 * minimize possible rounding errors.
3121 	 */
3122 	tmp = (u64)m1 * m2_int * ref_clock +
3123 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3124 	tmp = div_u64(tmp, 5 * div1 * div2);
3125 
3126 	return tmp;
3127 }
3128 
3129 /**
3130  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3131  * @crtc_state: state for the CRTC to select the DPLL for
3132  * @port_dpll_id: the active @port_dpll_id to select
3133  *
3134  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3135  * CRTC.
3136  */
3137 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3138 			      enum icl_port_dpll_id port_dpll_id)
3139 {
3140 	struct icl_port_dpll *port_dpll =
3141 		&crtc_state->icl_port_dplls[port_dpll_id];
3142 
3143 	crtc_state->shared_dpll = port_dpll->pll;
3144 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3145 }
3146 
3147 static void icl_update_active_dpll(struct intel_atomic_state *state,
3148 				   struct intel_crtc *crtc,
3149 				   struct intel_encoder *encoder)
3150 {
3151 	struct intel_crtc_state *crtc_state =
3152 		intel_atomic_get_new_crtc_state(state, crtc);
3153 	struct intel_digital_port *primary_port;
3154 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3155 
3156 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3157 		enc_to_mst(encoder)->primary :
3158 		enc_to_dig_port(encoder);
3159 
3160 	if (primary_port &&
3161 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3162 	     intel_tc_port_in_legacy_mode(primary_port)))
3163 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3164 
3165 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3166 }
3167 
3168 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3169 				      struct intel_crtc *crtc)
3170 {
3171 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3172 	struct intel_crtc_state *crtc_state =
3173 		intel_atomic_get_new_crtc_state(state, crtc);
3174 	struct icl_port_dpll *port_dpll =
3175 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3176 	struct skl_wrpll_params pll_params = {};
3177 	int ret;
3178 
3179 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3180 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3181 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3182 	else
3183 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3184 
3185 	if (ret)
3186 		return ret;
3187 
3188 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3189 
3190 	/* this is mainly for the fastset check */
3191 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3192 
3193 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3194 							    &port_dpll->hw_state);
3195 
3196 	return 0;
3197 }
3198 
3199 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3200 				  struct intel_crtc *crtc,
3201 				  struct intel_encoder *encoder)
3202 {
3203 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3204 	struct intel_crtc_state *crtc_state =
3205 		intel_atomic_get_new_crtc_state(state, crtc);
3206 	struct icl_port_dpll *port_dpll =
3207 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3208 	enum port port = encoder->port;
3209 	unsigned long dpll_mask;
3210 
3211 	if (IS_ALDERLAKE_S(dev_priv)) {
3212 		dpll_mask =
3213 			BIT(DPLL_ID_DG1_DPLL3) |
3214 			BIT(DPLL_ID_DG1_DPLL2) |
3215 			BIT(DPLL_ID_ICL_DPLL1) |
3216 			BIT(DPLL_ID_ICL_DPLL0);
3217 	} else if (IS_DG1(dev_priv)) {
3218 		if (port == PORT_D || port == PORT_E) {
3219 			dpll_mask =
3220 				BIT(DPLL_ID_DG1_DPLL2) |
3221 				BIT(DPLL_ID_DG1_DPLL3);
3222 		} else {
3223 			dpll_mask =
3224 				BIT(DPLL_ID_DG1_DPLL0) |
3225 				BIT(DPLL_ID_DG1_DPLL1);
3226 		}
3227 	} else if (IS_ROCKETLAKE(dev_priv)) {
3228 		dpll_mask =
3229 			BIT(DPLL_ID_EHL_DPLL4) |
3230 			BIT(DPLL_ID_ICL_DPLL1) |
3231 			BIT(DPLL_ID_ICL_DPLL0);
3232 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3233 		dpll_mask =
3234 			BIT(DPLL_ID_EHL_DPLL4) |
3235 			BIT(DPLL_ID_ICL_DPLL1) |
3236 			BIT(DPLL_ID_ICL_DPLL0);
3237 	} else {
3238 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3239 	}
3240 
3241 	/* Eliminate DPLLs from consideration if reserved by HTI */
3242 	dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3243 
3244 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3245 						&port_dpll->hw_state,
3246 						dpll_mask);
3247 	if (!port_dpll->pll)
3248 		return -EINVAL;
3249 
3250 	intel_reference_shared_dpll(state, crtc,
3251 				    port_dpll->pll, &port_dpll->hw_state);
3252 
3253 	icl_update_active_dpll(state, crtc, encoder);
3254 
3255 	return 0;
3256 }
3257 
3258 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3259 				    struct intel_crtc *crtc)
3260 {
3261 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3262 	struct intel_crtc_state *crtc_state =
3263 		intel_atomic_get_new_crtc_state(state, crtc);
3264 	struct icl_port_dpll *port_dpll =
3265 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3266 	struct skl_wrpll_params pll_params = {};
3267 	int ret;
3268 
3269 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3270 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3271 	if (ret)
3272 		return ret;
3273 
3274 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3275 
3276 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3277 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3278 	if (ret)
3279 		return ret;
3280 
3281 	/* this is mainly for the fastset check */
3282 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3283 
3284 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3285 							 &port_dpll->hw_state);
3286 
3287 	return 0;
3288 }
3289 
3290 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3291 				struct intel_crtc *crtc,
3292 				struct intel_encoder *encoder)
3293 {
3294 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3295 	struct intel_crtc_state *crtc_state =
3296 		intel_atomic_get_new_crtc_state(state, crtc);
3297 	struct icl_port_dpll *port_dpll =
3298 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3299 	enum intel_dpll_id dpll_id;
3300 	int ret;
3301 
3302 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3303 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3304 						&port_dpll->hw_state,
3305 						BIT(DPLL_ID_ICL_TBTPLL));
3306 	if (!port_dpll->pll)
3307 		return -EINVAL;
3308 	intel_reference_shared_dpll(state, crtc,
3309 				    port_dpll->pll, &port_dpll->hw_state);
3310 
3311 
3312 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3313 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3314 							 encoder->port));
3315 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3316 						&port_dpll->hw_state,
3317 						BIT(dpll_id));
3318 	if (!port_dpll->pll) {
3319 		ret = -EINVAL;
3320 		goto err_unreference_tbt_pll;
3321 	}
3322 	intel_reference_shared_dpll(state, crtc,
3323 				    port_dpll->pll, &port_dpll->hw_state);
3324 
3325 	icl_update_active_dpll(state, crtc, encoder);
3326 
3327 	return 0;
3328 
3329 err_unreference_tbt_pll:
3330 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3331 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3332 
3333 	return ret;
3334 }
3335 
3336 static int icl_compute_dplls(struct intel_atomic_state *state,
3337 			     struct intel_crtc *crtc,
3338 			     struct intel_encoder *encoder)
3339 {
3340 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3341 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3342 
3343 	if (intel_phy_is_combo(dev_priv, phy))
3344 		return icl_compute_combo_phy_dpll(state, crtc);
3345 	else if (intel_phy_is_tc(dev_priv, phy))
3346 		return icl_compute_tc_phy_dplls(state, crtc);
3347 
3348 	MISSING_CASE(phy);
3349 
3350 	return 0;
3351 }
3352 
3353 static int icl_get_dplls(struct intel_atomic_state *state,
3354 			 struct intel_crtc *crtc,
3355 			 struct intel_encoder *encoder)
3356 {
3357 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3358 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3359 
3360 	if (intel_phy_is_combo(dev_priv, phy))
3361 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3362 	else if (intel_phy_is_tc(dev_priv, phy))
3363 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3364 
3365 	MISSING_CASE(phy);
3366 
3367 	return -EINVAL;
3368 }
3369 
3370 static void icl_put_dplls(struct intel_atomic_state *state,
3371 			  struct intel_crtc *crtc)
3372 {
3373 	const struct intel_crtc_state *old_crtc_state =
3374 		intel_atomic_get_old_crtc_state(state, crtc);
3375 	struct intel_crtc_state *new_crtc_state =
3376 		intel_atomic_get_new_crtc_state(state, crtc);
3377 	enum icl_port_dpll_id id;
3378 
3379 	new_crtc_state->shared_dpll = NULL;
3380 
3381 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3382 		const struct icl_port_dpll *old_port_dpll =
3383 			&old_crtc_state->icl_port_dplls[id];
3384 		struct icl_port_dpll *new_port_dpll =
3385 			&new_crtc_state->icl_port_dplls[id];
3386 
3387 		new_port_dpll->pll = NULL;
3388 
3389 		if (!old_port_dpll->pll)
3390 			continue;
3391 
3392 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3393 	}
3394 }
3395 
3396 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3397 				struct intel_shared_dpll *pll,
3398 				struct intel_dpll_hw_state *hw_state)
3399 {
3400 	const enum intel_dpll_id id = pll->info->id;
3401 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3402 	intel_wakeref_t wakeref;
3403 	bool ret = false;
3404 	u32 val;
3405 
3406 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3407 
3408 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3409 						     POWER_DOMAIN_DISPLAY_CORE);
3410 	if (!wakeref)
3411 		return false;
3412 
3413 	val = intel_de_read(dev_priv, enable_reg);
3414 	if (!(val & PLL_ENABLE))
3415 		goto out;
3416 
3417 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3418 						  MG_REFCLKIN_CTL(tc_port));
3419 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3420 
3421 	hw_state->mg_clktop2_coreclkctl1 =
3422 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3423 	hw_state->mg_clktop2_coreclkctl1 &=
3424 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3425 
3426 	hw_state->mg_clktop2_hsclkctl =
3427 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3428 	hw_state->mg_clktop2_hsclkctl &=
3429 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3430 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3431 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3432 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3433 
3434 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3435 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3436 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3437 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3438 						   MG_PLL_FRAC_LOCK(tc_port));
3439 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3440 
3441 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3442 	hw_state->mg_pll_tdc_coldst_bias =
3443 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3444 
3445 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3446 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3447 		hw_state->mg_pll_bias_mask = 0;
3448 	} else {
3449 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3450 		hw_state->mg_pll_bias_mask = -1U;
3451 	}
3452 
3453 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3454 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3455 
3456 	ret = true;
3457 out:
3458 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3459 	return ret;
3460 }
3461 
3462 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3463 				 struct intel_shared_dpll *pll,
3464 				 struct intel_dpll_hw_state *hw_state)
3465 {
3466 	const enum intel_dpll_id id = pll->info->id;
3467 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3468 	intel_wakeref_t wakeref;
3469 	bool ret = false;
3470 	u32 val;
3471 
3472 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3473 						     POWER_DOMAIN_DISPLAY_CORE);
3474 	if (!wakeref)
3475 		return false;
3476 
3477 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3478 	if (!(val & PLL_ENABLE))
3479 		goto out;
3480 
3481 	/*
3482 	 * All registers read here have the same HIP_INDEX_REG even though
3483 	 * they are on different building blocks
3484 	 */
3485 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3486 						       DKL_REFCLKIN_CTL(tc_port));
3487 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3488 
3489 	hw_state->mg_clktop2_hsclkctl =
3490 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3491 	hw_state->mg_clktop2_hsclkctl &=
3492 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3493 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3494 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3495 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3496 
3497 	hw_state->mg_clktop2_coreclkctl1 =
3498 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3499 	hw_state->mg_clktop2_coreclkctl1 &=
3500 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3501 
3502 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3503 	val = DKL_PLL_DIV0_MASK;
3504 	if (dev_priv->display.vbt.override_afc_startup)
3505 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3506 	hw_state->mg_pll_div0 &= val;
3507 
3508 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3509 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3510 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3511 
3512 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3513 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3514 				 DKL_PLL_SSC_STEP_LEN_MASK |
3515 				 DKL_PLL_SSC_STEP_NUM_MASK |
3516 				 DKL_PLL_SSC_EN);
3517 
3518 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3519 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3520 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3521 
3522 	hw_state->mg_pll_tdc_coldst_bias =
3523 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3524 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3525 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3526 
3527 	ret = true;
3528 out:
3529 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3530 	return ret;
3531 }
3532 
3533 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3534 				 struct intel_shared_dpll *pll,
3535 				 struct intel_dpll_hw_state *hw_state,
3536 				 i915_reg_t enable_reg)
3537 {
3538 	const enum intel_dpll_id id = pll->info->id;
3539 	intel_wakeref_t wakeref;
3540 	bool ret = false;
3541 	u32 val;
3542 
3543 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3544 						     POWER_DOMAIN_DISPLAY_CORE);
3545 	if (!wakeref)
3546 		return false;
3547 
3548 	val = intel_de_read(dev_priv, enable_reg);
3549 	if (!(val & PLL_ENABLE))
3550 		goto out;
3551 
3552 	if (IS_ALDERLAKE_S(dev_priv)) {
3553 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3554 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3555 	} else if (IS_DG1(dev_priv)) {
3556 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3557 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3558 	} else if (IS_ROCKETLAKE(dev_priv)) {
3559 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3560 						 RKL_DPLL_CFGCR0(id));
3561 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3562 						 RKL_DPLL_CFGCR1(id));
3563 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3564 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3565 						 TGL_DPLL_CFGCR0(id));
3566 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3567 						 TGL_DPLL_CFGCR1(id));
3568 		if (dev_priv->display.vbt.override_afc_startup) {
3569 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3570 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3571 		}
3572 	} else {
3573 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3574 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3575 							 ICL_DPLL_CFGCR0(4));
3576 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3577 							 ICL_DPLL_CFGCR1(4));
3578 		} else {
3579 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3580 							 ICL_DPLL_CFGCR0(id));
3581 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3582 							 ICL_DPLL_CFGCR1(id));
3583 		}
3584 	}
3585 
3586 	ret = true;
3587 out:
3588 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3589 	return ret;
3590 }
3591 
3592 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3593 				   struct intel_shared_dpll *pll,
3594 				   struct intel_dpll_hw_state *hw_state)
3595 {
3596 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3597 
3598 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3599 }
3600 
3601 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3602 				 struct intel_shared_dpll *pll,
3603 				 struct intel_dpll_hw_state *hw_state)
3604 {
3605 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3606 }
3607 
3608 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3609 			   struct intel_shared_dpll *pll)
3610 {
3611 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3612 	const enum intel_dpll_id id = pll->info->id;
3613 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3614 
3615 	if (IS_ALDERLAKE_S(dev_priv)) {
3616 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3617 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3618 	} else if (IS_DG1(dev_priv)) {
3619 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3620 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3621 	} else if (IS_ROCKETLAKE(dev_priv)) {
3622 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3623 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3624 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3625 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3626 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3627 		div0_reg = TGL_DPLL0_DIV0(id);
3628 	} else {
3629 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3630 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3631 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3632 		} else {
3633 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3634 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3635 		}
3636 	}
3637 
3638 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3639 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3640 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3641 			 !i915_mmio_reg_valid(div0_reg));
3642 	if (dev_priv->display.vbt.override_afc_startup &&
3643 	    i915_mmio_reg_valid(div0_reg))
3644 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3645 			     hw_state->div0);
3646 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3647 }
3648 
3649 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3650 			     struct intel_shared_dpll *pll)
3651 {
3652 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3653 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3654 	u32 val;
3655 
3656 	/*
3657 	 * Some of the following registers have reserved fields, so program
3658 	 * these with RMW based on a mask. The mask can be fixed or generated
3659 	 * during the calc/readout phase if the mask depends on some other HW
3660 	 * state like refclk, see icl_calc_mg_pll_state().
3661 	 */
3662 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3663 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3664 	val |= hw_state->mg_refclkin_ctl;
3665 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3666 
3667 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3668 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3669 	val |= hw_state->mg_clktop2_coreclkctl1;
3670 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3671 
3672 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3673 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3674 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3675 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3676 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3677 	val |= hw_state->mg_clktop2_hsclkctl;
3678 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3679 
3680 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3681 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3682 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3683 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3684 		       hw_state->mg_pll_frac_lock);
3685 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3686 
3687 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3688 	val &= ~hw_state->mg_pll_bias_mask;
3689 	val |= hw_state->mg_pll_bias;
3690 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3691 
3692 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3693 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3694 	val |= hw_state->mg_pll_tdc_coldst_bias;
3695 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3696 
3697 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3698 }
3699 
3700 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3701 			  struct intel_shared_dpll *pll)
3702 {
3703 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3704 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3705 	u32 val;
3706 
3707 	/*
3708 	 * All registers programmed here have the same HIP_INDEX_REG even
3709 	 * though on different building block
3710 	 */
3711 	/* All the registers are RMW */
3712 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3713 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3714 	val |= hw_state->mg_refclkin_ctl;
3715 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3716 
3717 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3718 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3719 	val |= hw_state->mg_clktop2_coreclkctl1;
3720 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3721 
3722 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3723 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3724 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3725 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3726 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3727 	val |= hw_state->mg_clktop2_hsclkctl;
3728 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3729 
3730 	val = DKL_PLL_DIV0_MASK;
3731 	if (dev_priv->display.vbt.override_afc_startup)
3732 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3733 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3734 			  hw_state->mg_pll_div0);
3735 
3736 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3737 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3738 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3739 	val |= hw_state->mg_pll_div1;
3740 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3741 
3742 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3743 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3744 		 DKL_PLL_SSC_STEP_LEN_MASK |
3745 		 DKL_PLL_SSC_STEP_NUM_MASK |
3746 		 DKL_PLL_SSC_EN);
3747 	val |= hw_state->mg_pll_ssc;
3748 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3749 
3750 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3751 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3752 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3753 	val |= hw_state->mg_pll_bias;
3754 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3755 
3756 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3757 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3758 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3759 	val |= hw_state->mg_pll_tdc_coldst_bias;
3760 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3761 
3762 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3763 }
3764 
3765 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3766 				 struct intel_shared_dpll *pll,
3767 				 i915_reg_t enable_reg)
3768 {
3769 	u32 val;
3770 
3771 	val = intel_de_read(dev_priv, enable_reg);
3772 	val |= PLL_POWER_ENABLE;
3773 	intel_de_write(dev_priv, enable_reg, val);
3774 
3775 	/*
3776 	 * The spec says we need to "wait" but it also says it should be
3777 	 * immediate.
3778 	 */
3779 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3780 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3781 			pll->info->id);
3782 }
3783 
3784 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3785 			   struct intel_shared_dpll *pll,
3786 			   i915_reg_t enable_reg)
3787 {
3788 	u32 val;
3789 
3790 	val = intel_de_read(dev_priv, enable_reg);
3791 	val |= PLL_ENABLE;
3792 	intel_de_write(dev_priv, enable_reg, val);
3793 
3794 	/* Timeout is actually 600us. */
3795 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3796 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3797 }
3798 
3799 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3800 {
3801 	u32 val;
3802 
3803 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3804 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3805 		return;
3806 	/*
3807 	 * Wa_16011069516:adl-p[a0]
3808 	 *
3809 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3810 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3811 	 * sanity check this assumption with a double read, which presumably
3812 	 * returns the correct value even with clock gating on.
3813 	 *
3814 	 * Instead of the usual place for workarounds we apply this one here,
3815 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3816 	 */
3817 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3818 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3819 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3820 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3821 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3822 }
3823 
3824 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3825 			     struct intel_shared_dpll *pll)
3826 {
3827 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3828 
3829 	if (IS_JSL_EHL(dev_priv) &&
3830 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3831 
3832 		/*
3833 		 * We need to disable DC states when this DPLL is enabled.
3834 		 * This can be done by taking a reference on DPLL4 power
3835 		 * domain.
3836 		 */
3837 		pll->wakeref = intel_display_power_get(dev_priv,
3838 						       POWER_DOMAIN_DC_OFF);
3839 	}
3840 
3841 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3842 
3843 	icl_dpll_write(dev_priv, pll);
3844 
3845 	/*
3846 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3847 	 * paths should already be setting the appropriate voltage, hence we do
3848 	 * nothing here.
3849 	 */
3850 
3851 	icl_pll_enable(dev_priv, pll, enable_reg);
3852 
3853 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3854 
3855 	/* DVFS post sequence would be here. See the comment above. */
3856 }
3857 
3858 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3859 			   struct intel_shared_dpll *pll)
3860 {
3861 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3862 
3863 	icl_dpll_write(dev_priv, pll);
3864 
3865 	/*
3866 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3867 	 * paths should already be setting the appropriate voltage, hence we do
3868 	 * nothing here.
3869 	 */
3870 
3871 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3872 
3873 	/* DVFS post sequence would be here. See the comment above. */
3874 }
3875 
3876 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3877 			  struct intel_shared_dpll *pll)
3878 {
3879 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3880 
3881 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3882 
3883 	if (DISPLAY_VER(dev_priv) >= 12)
3884 		dkl_pll_write(dev_priv, pll);
3885 	else
3886 		icl_mg_pll_write(dev_priv, pll);
3887 
3888 	/*
3889 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3890 	 * paths should already be setting the appropriate voltage, hence we do
3891 	 * nothing here.
3892 	 */
3893 
3894 	icl_pll_enable(dev_priv, pll, enable_reg);
3895 
3896 	/* DVFS post sequence would be here. See the comment above. */
3897 }
3898 
3899 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3900 			    struct intel_shared_dpll *pll,
3901 			    i915_reg_t enable_reg)
3902 {
3903 	u32 val;
3904 
3905 	/* The first steps are done by intel_ddi_post_disable(). */
3906 
3907 	/*
3908 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3909 	 * paths should already be setting the appropriate voltage, hence we do
3910 	 * nothing here.
3911 	 */
3912 
3913 	val = intel_de_read(dev_priv, enable_reg);
3914 	val &= ~PLL_ENABLE;
3915 	intel_de_write(dev_priv, enable_reg, val);
3916 
3917 	/* Timeout is actually 1us. */
3918 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3919 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3920 
3921 	/* DVFS post sequence would be here. See the comment above. */
3922 
3923 	val = intel_de_read(dev_priv, enable_reg);
3924 	val &= ~PLL_POWER_ENABLE;
3925 	intel_de_write(dev_priv, enable_reg, val);
3926 
3927 	/*
3928 	 * The spec says we need to "wait" but it also says it should be
3929 	 * immediate.
3930 	 */
3931 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3932 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3933 			pll->info->id);
3934 }
3935 
3936 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3937 			      struct intel_shared_dpll *pll)
3938 {
3939 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3940 
3941 	icl_pll_disable(dev_priv, pll, enable_reg);
3942 
3943 	if (IS_JSL_EHL(dev_priv) &&
3944 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3945 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3946 					pll->wakeref);
3947 }
3948 
3949 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3950 			    struct intel_shared_dpll *pll)
3951 {
3952 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3953 }
3954 
3955 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3956 			   struct intel_shared_dpll *pll)
3957 {
3958 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3959 
3960 	icl_pll_disable(dev_priv, pll, enable_reg);
3961 }
3962 
3963 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3964 {
3965 	/* No SSC ref */
3966 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3967 }
3968 
3969 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3970 			      const struct intel_dpll_hw_state *hw_state)
3971 {
3972 	drm_dbg_kms(&dev_priv->drm,
3973 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3974 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3975 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3976 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3977 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3978 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3979 		    hw_state->cfgcr0, hw_state->cfgcr1,
3980 		    hw_state->div0,
3981 		    hw_state->mg_refclkin_ctl,
3982 		    hw_state->mg_clktop2_coreclkctl1,
3983 		    hw_state->mg_clktop2_hsclkctl,
3984 		    hw_state->mg_pll_div0,
3985 		    hw_state->mg_pll_div1,
3986 		    hw_state->mg_pll_lf,
3987 		    hw_state->mg_pll_frac_lock,
3988 		    hw_state->mg_pll_ssc,
3989 		    hw_state->mg_pll_bias,
3990 		    hw_state->mg_pll_tdc_coldst_bias);
3991 }
3992 
3993 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3994 	.enable = combo_pll_enable,
3995 	.disable = combo_pll_disable,
3996 	.get_hw_state = combo_pll_get_hw_state,
3997 	.get_freq = icl_ddi_combo_pll_get_freq,
3998 };
3999 
4000 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4001 	.enable = tbt_pll_enable,
4002 	.disable = tbt_pll_disable,
4003 	.get_hw_state = tbt_pll_get_hw_state,
4004 	.get_freq = icl_ddi_tbt_pll_get_freq,
4005 };
4006 
4007 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4008 	.enable = mg_pll_enable,
4009 	.disable = mg_pll_disable,
4010 	.get_hw_state = mg_pll_get_hw_state,
4011 	.get_freq = icl_ddi_mg_pll_get_freq,
4012 };
4013 
4014 static const struct dpll_info icl_plls[] = {
4015 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4016 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4017 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4018 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4019 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4020 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4021 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4022 	{ },
4023 };
4024 
4025 static const struct intel_dpll_mgr icl_pll_mgr = {
4026 	.dpll_info = icl_plls,
4027 	.compute_dplls = icl_compute_dplls,
4028 	.get_dplls = icl_get_dplls,
4029 	.put_dplls = icl_put_dplls,
4030 	.update_active_dpll = icl_update_active_dpll,
4031 	.update_ref_clks = icl_update_dpll_ref_clks,
4032 	.dump_hw_state = icl_dump_hw_state,
4033 };
4034 
4035 static const struct dpll_info ehl_plls[] = {
4036 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4037 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4038 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4039 	{ },
4040 };
4041 
4042 static const struct intel_dpll_mgr ehl_pll_mgr = {
4043 	.dpll_info = ehl_plls,
4044 	.compute_dplls = icl_compute_dplls,
4045 	.get_dplls = icl_get_dplls,
4046 	.put_dplls = icl_put_dplls,
4047 	.update_ref_clks = icl_update_dpll_ref_clks,
4048 	.dump_hw_state = icl_dump_hw_state,
4049 };
4050 
4051 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4052 	.enable = mg_pll_enable,
4053 	.disable = mg_pll_disable,
4054 	.get_hw_state = dkl_pll_get_hw_state,
4055 	.get_freq = icl_ddi_mg_pll_get_freq,
4056 };
4057 
4058 static const struct dpll_info tgl_plls[] = {
4059 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4060 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4061 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4062 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4063 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4064 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4065 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4066 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4067 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4068 	{ },
4069 };
4070 
4071 static const struct intel_dpll_mgr tgl_pll_mgr = {
4072 	.dpll_info = tgl_plls,
4073 	.compute_dplls = icl_compute_dplls,
4074 	.get_dplls = icl_get_dplls,
4075 	.put_dplls = icl_put_dplls,
4076 	.update_active_dpll = icl_update_active_dpll,
4077 	.update_ref_clks = icl_update_dpll_ref_clks,
4078 	.dump_hw_state = icl_dump_hw_state,
4079 };
4080 
4081 static const struct dpll_info rkl_plls[] = {
4082 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4083 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4084 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4085 	{ },
4086 };
4087 
4088 static const struct intel_dpll_mgr rkl_pll_mgr = {
4089 	.dpll_info = rkl_plls,
4090 	.compute_dplls = icl_compute_dplls,
4091 	.get_dplls = icl_get_dplls,
4092 	.put_dplls = icl_put_dplls,
4093 	.update_ref_clks = icl_update_dpll_ref_clks,
4094 	.dump_hw_state = icl_dump_hw_state,
4095 };
4096 
4097 static const struct dpll_info dg1_plls[] = {
4098 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4099 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4100 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4101 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4102 	{ },
4103 };
4104 
4105 static const struct intel_dpll_mgr dg1_pll_mgr = {
4106 	.dpll_info = dg1_plls,
4107 	.compute_dplls = icl_compute_dplls,
4108 	.get_dplls = icl_get_dplls,
4109 	.put_dplls = icl_put_dplls,
4110 	.update_ref_clks = icl_update_dpll_ref_clks,
4111 	.dump_hw_state = icl_dump_hw_state,
4112 };
4113 
4114 static const struct dpll_info adls_plls[] = {
4115 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4116 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4117 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4118 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4119 	{ },
4120 };
4121 
4122 static const struct intel_dpll_mgr adls_pll_mgr = {
4123 	.dpll_info = adls_plls,
4124 	.compute_dplls = icl_compute_dplls,
4125 	.get_dplls = icl_get_dplls,
4126 	.put_dplls = icl_put_dplls,
4127 	.update_ref_clks = icl_update_dpll_ref_clks,
4128 	.dump_hw_state = icl_dump_hw_state,
4129 };
4130 
4131 static const struct dpll_info adlp_plls[] = {
4132 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4133 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4134 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4135 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4136 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4137 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4138 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4139 	{ },
4140 };
4141 
4142 static const struct intel_dpll_mgr adlp_pll_mgr = {
4143 	.dpll_info = adlp_plls,
4144 	.compute_dplls = icl_compute_dplls,
4145 	.get_dplls = icl_get_dplls,
4146 	.put_dplls = icl_put_dplls,
4147 	.update_active_dpll = icl_update_active_dpll,
4148 	.update_ref_clks = icl_update_dpll_ref_clks,
4149 	.dump_hw_state = icl_dump_hw_state,
4150 };
4151 
4152 /**
4153  * intel_shared_dpll_init - Initialize shared DPLLs
4154  * @dev_priv: i915 device
4155  *
4156  * Initialize shared DPLLs for @dev_priv.
4157  */
4158 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4159 {
4160 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4161 	const struct dpll_info *dpll_info;
4162 	int i;
4163 
4164 	mutex_init(&dev_priv->display.dpll.lock);
4165 
4166 	if (IS_DG2(dev_priv))
4167 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4168 		dpll_mgr = NULL;
4169 	else if (IS_ALDERLAKE_P(dev_priv))
4170 		dpll_mgr = &adlp_pll_mgr;
4171 	else if (IS_ALDERLAKE_S(dev_priv))
4172 		dpll_mgr = &adls_pll_mgr;
4173 	else if (IS_DG1(dev_priv))
4174 		dpll_mgr = &dg1_pll_mgr;
4175 	else if (IS_ROCKETLAKE(dev_priv))
4176 		dpll_mgr = &rkl_pll_mgr;
4177 	else if (DISPLAY_VER(dev_priv) >= 12)
4178 		dpll_mgr = &tgl_pll_mgr;
4179 	else if (IS_JSL_EHL(dev_priv))
4180 		dpll_mgr = &ehl_pll_mgr;
4181 	else if (DISPLAY_VER(dev_priv) >= 11)
4182 		dpll_mgr = &icl_pll_mgr;
4183 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4184 		dpll_mgr = &bxt_pll_mgr;
4185 	else if (DISPLAY_VER(dev_priv) == 9)
4186 		dpll_mgr = &skl_pll_mgr;
4187 	else if (HAS_DDI(dev_priv))
4188 		dpll_mgr = &hsw_pll_mgr;
4189 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4190 		dpll_mgr = &pch_pll_mgr;
4191 
4192 	if (!dpll_mgr) {
4193 		dev_priv->display.dpll.num_shared_dpll = 0;
4194 		return;
4195 	}
4196 
4197 	dpll_info = dpll_mgr->dpll_info;
4198 
4199 	for (i = 0; dpll_info[i].name; i++) {
4200 		if (drm_WARN_ON(&dev_priv->drm,
4201 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4202 			break;
4203 
4204 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4205 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4206 	}
4207 
4208 	dev_priv->display.dpll.mgr = dpll_mgr;
4209 	dev_priv->display.dpll.num_shared_dpll = i;
4210 }
4211 
4212 /**
4213  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4214  * @state: atomic state
4215  * @crtc: CRTC to compute DPLLs for
4216  * @encoder: encoder
4217  *
4218  * This function computes the DPLL state for the given CRTC and encoder.
4219  *
4220  * The new configuration in the atomic commit @state is made effective by
4221  * calling intel_shared_dpll_swap_state().
4222  *
4223  * Returns:
4224  * 0 on success, negative error code on falure.
4225  */
4226 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4227 			       struct intel_crtc *crtc,
4228 			       struct intel_encoder *encoder)
4229 {
4230 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4231 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4232 
4233 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4234 		return -EINVAL;
4235 
4236 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4237 }
4238 
4239 /**
4240  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4241  * @state: atomic state
4242  * @crtc: CRTC to reserve DPLLs for
4243  * @encoder: encoder
4244  *
4245  * This function reserves all required DPLLs for the given CRTC and encoder
4246  * combination in the current atomic commit @state and the new @crtc atomic
4247  * state.
4248  *
4249  * The new configuration in the atomic commit @state is made effective by
4250  * calling intel_shared_dpll_swap_state().
4251  *
4252  * The reserved DPLLs should be released by calling
4253  * intel_release_shared_dplls().
4254  *
4255  * Returns:
4256  * 0 if all required DPLLs were successfully reserved,
4257  * negative error code otherwise.
4258  */
4259 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4260 			       struct intel_crtc *crtc,
4261 			       struct intel_encoder *encoder)
4262 {
4263 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4264 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4265 
4266 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4267 		return -EINVAL;
4268 
4269 	return dpll_mgr->get_dplls(state, crtc, encoder);
4270 }
4271 
4272 /**
4273  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4274  * @state: atomic state
4275  * @crtc: crtc from which the DPLLs are to be released
4276  *
4277  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4278  * from the current atomic commit @state and the old @crtc atomic state.
4279  *
4280  * The new configuration in the atomic commit @state is made effective by
4281  * calling intel_shared_dpll_swap_state().
4282  */
4283 void intel_release_shared_dplls(struct intel_atomic_state *state,
4284 				struct intel_crtc *crtc)
4285 {
4286 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4287 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4288 
4289 	/*
4290 	 * FIXME: this function is called for every platform having a
4291 	 * compute_clock hook, even though the platform doesn't yet support
4292 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4293 	 * called on those.
4294 	 */
4295 	if (!dpll_mgr)
4296 		return;
4297 
4298 	dpll_mgr->put_dplls(state, crtc);
4299 }
4300 
4301 /**
4302  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4303  * @state: atomic state
4304  * @crtc: the CRTC for which to update the active DPLL
4305  * @encoder: encoder determining the type of port DPLL
4306  *
4307  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4308  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4309  * DPLL selected will be based on the current mode of the encoder's port.
4310  */
4311 void intel_update_active_dpll(struct intel_atomic_state *state,
4312 			      struct intel_crtc *crtc,
4313 			      struct intel_encoder *encoder)
4314 {
4315 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4316 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4317 
4318 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4319 		return;
4320 
4321 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4322 }
4323 
4324 /**
4325  * intel_dpll_get_freq - calculate the DPLL's output frequency
4326  * @i915: i915 device
4327  * @pll: DPLL for which to calculate the output frequency
4328  * @pll_state: DPLL state from which to calculate the output frequency
4329  *
4330  * Return the output frequency corresponding to @pll's passed in @pll_state.
4331  */
4332 int intel_dpll_get_freq(struct drm_i915_private *i915,
4333 			const struct intel_shared_dpll *pll,
4334 			const struct intel_dpll_hw_state *pll_state)
4335 {
4336 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4337 		return 0;
4338 
4339 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4340 }
4341 
4342 /**
4343  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4344  * @i915: i915 device
4345  * @pll: DPLL for which to calculate the output frequency
4346  * @hw_state: DPLL's hardware state
4347  *
4348  * Read out @pll's hardware state into @hw_state.
4349  */
4350 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4351 			     struct intel_shared_dpll *pll,
4352 			     struct intel_dpll_hw_state *hw_state)
4353 {
4354 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4355 }
4356 
4357 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4358 				  struct intel_shared_dpll *pll)
4359 {
4360 	struct intel_crtc *crtc;
4361 
4362 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4363 
4364 	if (IS_JSL_EHL(i915) && pll->on &&
4365 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4366 		pll->wakeref = intel_display_power_get(i915,
4367 						       POWER_DOMAIN_DC_OFF);
4368 	}
4369 
4370 	pll->state.pipe_mask = 0;
4371 	for_each_intel_crtc(&i915->drm, crtc) {
4372 		struct intel_crtc_state *crtc_state =
4373 			to_intel_crtc_state(crtc->base.state);
4374 
4375 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4376 			pll->state.pipe_mask |= BIT(crtc->pipe);
4377 	}
4378 	pll->active_mask = pll->state.pipe_mask;
4379 
4380 	drm_dbg_kms(&i915->drm,
4381 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4382 		    pll->info->name, pll->state.pipe_mask, pll->on);
4383 }
4384 
4385 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4386 {
4387 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4388 		i915->display.dpll.mgr->update_ref_clks(i915);
4389 }
4390 
4391 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4392 {
4393 	int i;
4394 
4395 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4396 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4397 }
4398 
4399 static void sanitize_dpll_state(struct drm_i915_private *i915,
4400 				struct intel_shared_dpll *pll)
4401 {
4402 	if (!pll->on)
4403 		return;
4404 
4405 	adlp_cmtg_clock_gating_wa(i915, pll);
4406 
4407 	if (pll->active_mask)
4408 		return;
4409 
4410 	drm_dbg_kms(&i915->drm,
4411 		    "%s enabled but not in use, disabling\n",
4412 		    pll->info->name);
4413 
4414 	pll->info->funcs->disable(i915, pll);
4415 	pll->on = false;
4416 }
4417 
4418 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4419 {
4420 	int i;
4421 
4422 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4423 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4424 }
4425 
4426 /**
4427  * intel_dpll_dump_hw_state - write hw_state to dmesg
4428  * @dev_priv: i915 drm device
4429  * @hw_state: hw state to be written to the log
4430  *
4431  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4432  */
4433 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4434 			      const struct intel_dpll_hw_state *hw_state)
4435 {
4436 	if (dev_priv->display.dpll.mgr) {
4437 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4438 	} else {
4439 		/* fallback for platforms that don't use the shared dpll
4440 		 * infrastructure
4441 		 */
4442 		drm_dbg_kms(&dev_priv->drm,
4443 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4444 			    "fp0: 0x%x, fp1: 0x%x\n",
4445 			    hw_state->dpll,
4446 			    hw_state->dpll_md,
4447 			    hw_state->fp0,
4448 			    hw_state->fp1);
4449 	}
4450 }
4451 
4452 static void
4453 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4454 			 struct intel_shared_dpll *pll,
4455 			 struct intel_crtc *crtc,
4456 			 struct intel_crtc_state *new_crtc_state)
4457 {
4458 	struct intel_dpll_hw_state dpll_hw_state;
4459 	u8 pipe_mask;
4460 	bool active;
4461 
4462 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4463 
4464 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4465 
4466 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4467 
4468 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4469 		I915_STATE_WARN(!pll->on && pll->active_mask,
4470 				"pll in active use but not on in sw tracking\n");
4471 		I915_STATE_WARN(pll->on && !pll->active_mask,
4472 				"pll is on but not used by any active pipe\n");
4473 		I915_STATE_WARN(pll->on != active,
4474 				"pll on state mismatch (expected %i, found %i)\n",
4475 				pll->on, active);
4476 	}
4477 
4478 	if (!crtc) {
4479 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4480 				"more active pll users than references: 0x%x vs 0x%x\n",
4481 				pll->active_mask, pll->state.pipe_mask);
4482 
4483 		return;
4484 	}
4485 
4486 	pipe_mask = BIT(crtc->pipe);
4487 
4488 	if (new_crtc_state->hw.active)
4489 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4490 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4491 				pipe_name(crtc->pipe), pll->active_mask);
4492 	else
4493 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4494 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4495 				pipe_name(crtc->pipe), pll->active_mask);
4496 
4497 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4498 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4499 			pipe_mask, pll->state.pipe_mask);
4500 
4501 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4502 					  &dpll_hw_state,
4503 					  sizeof(dpll_hw_state)),
4504 			"pll hw state mismatch\n");
4505 }
4506 
4507 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4508 				    struct intel_crtc_state *old_crtc_state,
4509 				    struct intel_crtc_state *new_crtc_state)
4510 {
4511 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4512 
4513 	if (new_crtc_state->shared_dpll)
4514 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4515 					 crtc, new_crtc_state);
4516 
4517 	if (old_crtc_state->shared_dpll &&
4518 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4519 		u8 pipe_mask = BIT(crtc->pipe);
4520 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4521 
4522 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4523 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4524 				pipe_name(crtc->pipe), pll->active_mask);
4525 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4526 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4527 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4528 	}
4529 }
4530 
4531 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4532 {
4533 	int i;
4534 
4535 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4536 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4537 					 NULL, NULL);
4538 }
4539