1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_de.h"
25 #include "intel_display_types.h"
26 #include "intel_dpio_phy.h"
27 #include "intel_dpll.h"
28 #include "intel_dpll_mgr.h"
29 #include "intel_pch_refclk.h"
30 #include "intel_tc.h"
31 #include "intel_tc_phy_regs.h"
32 
33 /**
34  * DOC: Display PLLs
35  *
36  * Display PLLs used for driving outputs vary by platform. While some have
37  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
38  * from a pool. In the latter scenario, it is possible that multiple pipes
39  * share a PLL if their configurations match.
40  *
41  * This file provides an abstraction over display PLLs. The function
42  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
43  * users of a PLL are tracked and that tracking is integrated with the atomic
44  * modset interface. During an atomic operation, required PLLs can be reserved
45  * for a given CRTC and encoder configuration by calling
46  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
47  * with intel_release_shared_dplls().
48  * Changes to the users are first staged in the atomic state, and then made
49  * effective by calling intel_shared_dpll_swap_state() during the atomic
50  * commit phase.
51  */
52 
53 /* platform specific hooks for managing DPLLs */
54 struct intel_shared_dpll_funcs {
55 	/*
56 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
57 	 * the pll is not already enabled.
58 	 */
59 	void (*enable)(struct drm_i915_private *i915,
60 		       struct intel_shared_dpll *pll);
61 
62 	/*
63 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
64 	 * only when it is safe to disable the pll, i.e., there are no more
65 	 * tracked users for it.
66 	 */
67 	void (*disable)(struct drm_i915_private *i915,
68 			struct intel_shared_dpll *pll);
69 
70 	/*
71 	 * Hook for reading the values currently programmed to the DPLL
72 	 * registers. This is used for initial hw state readout and state
73 	 * verification after a mode set.
74 	 */
75 	bool (*get_hw_state)(struct drm_i915_private *i915,
76 			     struct intel_shared_dpll *pll,
77 			     struct intel_dpll_hw_state *hw_state);
78 
79 	/*
80 	 * Hook for calculating the pll's output frequency based on its passed
81 	 * in state.
82 	 */
83 	int (*get_freq)(struct drm_i915_private *i915,
84 			const struct intel_shared_dpll *pll,
85 			const struct intel_dpll_hw_state *pll_state);
86 };
87 
88 struct intel_dpll_mgr {
89 	const struct dpll_info *dpll_info;
90 
91 	bool (*get_dplls)(struct intel_atomic_state *state,
92 			  struct intel_crtc *crtc,
93 			  struct intel_encoder *encoder);
94 	void (*put_dplls)(struct intel_atomic_state *state,
95 			  struct intel_crtc *crtc);
96 	void (*update_active_dpll)(struct intel_atomic_state *state,
97 				   struct intel_crtc *crtc,
98 				   struct intel_encoder *encoder);
99 	void (*update_ref_clks)(struct drm_i915_private *i915);
100 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
101 			      const struct intel_dpll_hw_state *hw_state);
102 };
103 
104 static void
105 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
106 				  struct intel_shared_dpll_state *shared_dpll)
107 {
108 	enum intel_dpll_id i;
109 
110 	/* Copy shared dpll state */
111 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
112 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
113 
114 		shared_dpll[i] = pll->state;
115 	}
116 }
117 
118 static struct intel_shared_dpll_state *
119 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
120 {
121 	struct intel_atomic_state *state = to_intel_atomic_state(s);
122 
123 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
124 
125 	if (!state->dpll_set) {
126 		state->dpll_set = true;
127 
128 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
129 						  state->shared_dpll);
130 	}
131 
132 	return state->shared_dpll;
133 }
134 
135 /**
136  * intel_get_shared_dpll_by_id - get a DPLL given its id
137  * @dev_priv: i915 device instance
138  * @id: pll id
139  *
140  * Returns:
141  * A pointer to the DPLL with @id
142  */
143 struct intel_shared_dpll *
144 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
145 			    enum intel_dpll_id id)
146 {
147 	return &dev_priv->dpll.shared_dplls[id];
148 }
149 
150 /**
151  * intel_get_shared_dpll_id - get the id of a DPLL
152  * @dev_priv: i915 device instance
153  * @pll: the DPLL
154  *
155  * Returns:
156  * The id of @pll
157  */
158 enum intel_dpll_id
159 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
160 			 struct intel_shared_dpll *pll)
161 {
162 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
163 
164 	if (drm_WARN_ON(&dev_priv->drm,
165 			pll_idx < 0 ||
166 			pll_idx >= dev_priv->dpll.num_shared_dpll))
167 		return -1;
168 
169 	return pll_idx;
170 }
171 
172 /* For ILK+ */
173 void assert_shared_dpll(struct drm_i915_private *dev_priv,
174 			struct intel_shared_dpll *pll,
175 			bool state)
176 {
177 	bool cur_state;
178 	struct intel_dpll_hw_state hw_state;
179 
180 	if (drm_WARN(&dev_priv->drm, !pll,
181 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
182 		return;
183 
184 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
185 	I915_STATE_WARN(cur_state != state,
186 	     "%s assertion failure (expected %s, current %s)\n",
187 			pll->info->name, onoff(state), onoff(cur_state));
188 }
189 
190 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
191 {
192 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
193 }
194 
195 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
196 {
197 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
198 }
199 
200 static i915_reg_t
201 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
202 			   struct intel_shared_dpll *pll)
203 {
204 	if (IS_DG1(i915))
205 		return DG1_DPLL_ENABLE(pll->info->id);
206 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
207 		return MG_PLL_ENABLE(0);
208 
209 	return ICL_DPLL_ENABLE(pll->info->id);
210 }
211 
212 static i915_reg_t
213 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
214 			struct intel_shared_dpll *pll)
215 {
216 	const enum intel_dpll_id id = pll->info->id;
217 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
218 
219 	if (IS_ALDERLAKE_P(i915))
220 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
221 
222 	return MG_PLL_ENABLE(tc_port);
223 }
224 
225 /**
226  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
227  * @crtc_state: CRTC, and its state, which has a shared DPLL
228  *
229  * Enable the shared DPLL used by @crtc.
230  */
231 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
232 {
233 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
234 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
235 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
236 	unsigned int pipe_mask = BIT(crtc->pipe);
237 	unsigned int old_mask;
238 
239 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
240 		return;
241 
242 	mutex_lock(&dev_priv->dpll.lock);
243 	old_mask = pll->active_mask;
244 
245 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
246 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
247 		goto out;
248 
249 	pll->active_mask |= pipe_mask;
250 
251 	drm_dbg_kms(&dev_priv->drm,
252 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
253 		    pll->info->name, pll->active_mask, pll->on,
254 		    crtc->base.base.id, crtc->base.name);
255 
256 	if (old_mask) {
257 		drm_WARN_ON(&dev_priv->drm, !pll->on);
258 		assert_shared_dpll_enabled(dev_priv, pll);
259 		goto out;
260 	}
261 	drm_WARN_ON(&dev_priv->drm, pll->on);
262 
263 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
264 	pll->info->funcs->enable(dev_priv, pll);
265 	pll->on = true;
266 
267 out:
268 	mutex_unlock(&dev_priv->dpll.lock);
269 }
270 
271 /**
272  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
273  * @crtc_state: CRTC, and its state, which has a shared DPLL
274  *
275  * Disable the shared DPLL used by @crtc.
276  */
277 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
278 {
279 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
280 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
281 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
282 	unsigned int pipe_mask = BIT(crtc->pipe);
283 
284 	/* PCH only available on ILK+ */
285 	if (DISPLAY_VER(dev_priv) < 5)
286 		return;
287 
288 	if (pll == NULL)
289 		return;
290 
291 	mutex_lock(&dev_priv->dpll.lock);
292 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
293 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
294 		     crtc->base.base.id, crtc->base.name))
295 		goto out;
296 
297 	drm_dbg_kms(&dev_priv->drm,
298 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
299 		    pll->info->name, pll->active_mask, pll->on,
300 		    crtc->base.base.id, crtc->base.name);
301 
302 	assert_shared_dpll_enabled(dev_priv, pll);
303 	drm_WARN_ON(&dev_priv->drm, !pll->on);
304 
305 	pll->active_mask &= ~pipe_mask;
306 	if (pll->active_mask)
307 		goto out;
308 
309 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
310 	pll->info->funcs->disable(dev_priv, pll);
311 	pll->on = false;
312 
313 out:
314 	mutex_unlock(&dev_priv->dpll.lock);
315 }
316 
317 static struct intel_shared_dpll *
318 intel_find_shared_dpll(struct intel_atomic_state *state,
319 		       const struct intel_crtc *crtc,
320 		       const struct intel_dpll_hw_state *pll_state,
321 		       unsigned long dpll_mask)
322 {
323 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
324 	struct intel_shared_dpll *pll, *unused_pll = NULL;
325 	struct intel_shared_dpll_state *shared_dpll;
326 	enum intel_dpll_id i;
327 
328 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
329 
330 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
331 
332 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
333 		pll = &dev_priv->dpll.shared_dplls[i];
334 
335 		/* Only want to check enabled timings first */
336 		if (shared_dpll[i].pipe_mask == 0) {
337 			if (!unused_pll)
338 				unused_pll = pll;
339 			continue;
340 		}
341 
342 		if (memcmp(pll_state,
343 			   &shared_dpll[i].hw_state,
344 			   sizeof(*pll_state)) == 0) {
345 			drm_dbg_kms(&dev_priv->drm,
346 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
347 				    crtc->base.base.id, crtc->base.name,
348 				    pll->info->name,
349 				    shared_dpll[i].pipe_mask,
350 				    pll->active_mask);
351 			return pll;
352 		}
353 	}
354 
355 	/* Ok no matching timings, maybe there's a free one? */
356 	if (unused_pll) {
357 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
358 			    crtc->base.base.id, crtc->base.name,
359 			    unused_pll->info->name);
360 		return unused_pll;
361 	}
362 
363 	return NULL;
364 }
365 
366 static void
367 intel_reference_shared_dpll(struct intel_atomic_state *state,
368 			    const struct intel_crtc *crtc,
369 			    const struct intel_shared_dpll *pll,
370 			    const struct intel_dpll_hw_state *pll_state)
371 {
372 	struct drm_i915_private *i915 = to_i915(state->base.dev);
373 	struct intel_shared_dpll_state *shared_dpll;
374 	const enum intel_dpll_id id = pll->info->id;
375 
376 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
377 
378 	if (shared_dpll[id].pipe_mask == 0)
379 		shared_dpll[id].hw_state = *pll_state;
380 
381 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
382 		pipe_name(crtc->pipe));
383 
384 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
385 }
386 
387 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
388 					  const struct intel_crtc *crtc,
389 					  const struct intel_shared_dpll *pll)
390 {
391 	struct intel_shared_dpll_state *shared_dpll;
392 
393 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
394 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
395 }
396 
397 static void intel_put_dpll(struct intel_atomic_state *state,
398 			   struct intel_crtc *crtc)
399 {
400 	const struct intel_crtc_state *old_crtc_state =
401 		intel_atomic_get_old_crtc_state(state, crtc);
402 	struct intel_crtc_state *new_crtc_state =
403 		intel_atomic_get_new_crtc_state(state, crtc);
404 
405 	new_crtc_state->shared_dpll = NULL;
406 
407 	if (!old_crtc_state->shared_dpll)
408 		return;
409 
410 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
411 }
412 
413 /**
414  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
415  * @state: atomic state
416  *
417  * This is the dpll version of drm_atomic_helper_swap_state() since the
418  * helper does not handle driver-specific global state.
419  *
420  * For consistency with atomic helpers this function does a complete swap,
421  * i.e. it also puts the current state into @state, even though there is no
422  * need for that at this moment.
423  */
424 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
425 {
426 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
427 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
428 	enum intel_dpll_id i;
429 
430 	if (!state->dpll_set)
431 		return;
432 
433 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
434 		struct intel_shared_dpll *pll =
435 			&dev_priv->dpll.shared_dplls[i];
436 
437 		swap(pll->state, shared_dpll[i]);
438 	}
439 }
440 
441 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
442 				      struct intel_shared_dpll *pll,
443 				      struct intel_dpll_hw_state *hw_state)
444 {
445 	const enum intel_dpll_id id = pll->info->id;
446 	intel_wakeref_t wakeref;
447 	u32 val;
448 
449 	wakeref = intel_display_power_get_if_enabled(dev_priv,
450 						     POWER_DOMAIN_DISPLAY_CORE);
451 	if (!wakeref)
452 		return false;
453 
454 	val = intel_de_read(dev_priv, PCH_DPLL(id));
455 	hw_state->dpll = val;
456 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
457 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
458 
459 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
460 
461 	return val & DPLL_VCO_ENABLE;
462 }
463 
464 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
465 {
466 	u32 val;
467 	bool enabled;
468 
469 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
470 
471 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
472 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
473 			    DREF_SUPERSPREAD_SOURCE_MASK));
474 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
475 }
476 
477 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
478 				struct intel_shared_dpll *pll)
479 {
480 	const enum intel_dpll_id id = pll->info->id;
481 
482 	/* PCH refclock must be enabled first */
483 	ibx_assert_pch_refclk_enabled(dev_priv);
484 
485 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
486 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
487 
488 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
489 
490 	/* Wait for the clocks to stabilize. */
491 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
492 	udelay(150);
493 
494 	/* The pixel multiplier can only be updated once the
495 	 * DPLL is enabled and the clocks are stable.
496 	 *
497 	 * So write it again.
498 	 */
499 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
500 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
501 	udelay(200);
502 }
503 
504 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
505 				 struct intel_shared_dpll *pll)
506 {
507 	const enum intel_dpll_id id = pll->info->id;
508 
509 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
510 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
511 	udelay(200);
512 }
513 
514 static bool ibx_get_dpll(struct intel_atomic_state *state,
515 			 struct intel_crtc *crtc,
516 			 struct intel_encoder *encoder)
517 {
518 	struct intel_crtc_state *crtc_state =
519 		intel_atomic_get_new_crtc_state(state, crtc);
520 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
521 	struct intel_shared_dpll *pll;
522 	enum intel_dpll_id i;
523 
524 	if (HAS_PCH_IBX(dev_priv)) {
525 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
526 		i = (enum intel_dpll_id) crtc->pipe;
527 		pll = &dev_priv->dpll.shared_dplls[i];
528 
529 		drm_dbg_kms(&dev_priv->drm,
530 			    "[CRTC:%d:%s] using pre-allocated %s\n",
531 			    crtc->base.base.id, crtc->base.name,
532 			    pll->info->name);
533 	} else {
534 		pll = intel_find_shared_dpll(state, crtc,
535 					     &crtc_state->dpll_hw_state,
536 					     BIT(DPLL_ID_PCH_PLL_B) |
537 					     BIT(DPLL_ID_PCH_PLL_A));
538 	}
539 
540 	if (!pll)
541 		return false;
542 
543 	/* reference the pll */
544 	intel_reference_shared_dpll(state, crtc,
545 				    pll, &crtc_state->dpll_hw_state);
546 
547 	crtc_state->shared_dpll = pll;
548 
549 	return true;
550 }
551 
552 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
553 			      const struct intel_dpll_hw_state *hw_state)
554 {
555 	drm_dbg_kms(&dev_priv->drm,
556 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
557 		    "fp0: 0x%x, fp1: 0x%x\n",
558 		    hw_state->dpll,
559 		    hw_state->dpll_md,
560 		    hw_state->fp0,
561 		    hw_state->fp1);
562 }
563 
564 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
565 	.enable = ibx_pch_dpll_enable,
566 	.disable = ibx_pch_dpll_disable,
567 	.get_hw_state = ibx_pch_dpll_get_hw_state,
568 };
569 
570 static const struct dpll_info pch_plls[] = {
571 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
572 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
573 	{ },
574 };
575 
576 static const struct intel_dpll_mgr pch_pll_mgr = {
577 	.dpll_info = pch_plls,
578 	.get_dplls = ibx_get_dpll,
579 	.put_dplls = intel_put_dpll,
580 	.dump_hw_state = ibx_dump_hw_state,
581 };
582 
583 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
584 			       struct intel_shared_dpll *pll)
585 {
586 	const enum intel_dpll_id id = pll->info->id;
587 
588 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
589 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
590 	udelay(20);
591 }
592 
593 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
594 				struct intel_shared_dpll *pll)
595 {
596 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
597 	intel_de_posting_read(dev_priv, SPLL_CTL);
598 	udelay(20);
599 }
600 
601 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
602 				  struct intel_shared_dpll *pll)
603 {
604 	const enum intel_dpll_id id = pll->info->id;
605 	u32 val;
606 
607 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
608 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
609 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
610 
611 	/*
612 	 * Try to set up the PCH reference clock once all DPLLs
613 	 * that depend on it have been shut down.
614 	 */
615 	if (dev_priv->pch_ssc_use & BIT(id))
616 		intel_init_pch_refclk(dev_priv);
617 }
618 
619 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
620 				 struct intel_shared_dpll *pll)
621 {
622 	enum intel_dpll_id id = pll->info->id;
623 	u32 val;
624 
625 	val = intel_de_read(dev_priv, SPLL_CTL);
626 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
627 	intel_de_posting_read(dev_priv, SPLL_CTL);
628 
629 	/*
630 	 * Try to set up the PCH reference clock once all DPLLs
631 	 * that depend on it have been shut down.
632 	 */
633 	if (dev_priv->pch_ssc_use & BIT(id))
634 		intel_init_pch_refclk(dev_priv);
635 }
636 
637 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
638 				       struct intel_shared_dpll *pll,
639 				       struct intel_dpll_hw_state *hw_state)
640 {
641 	const enum intel_dpll_id id = pll->info->id;
642 	intel_wakeref_t wakeref;
643 	u32 val;
644 
645 	wakeref = intel_display_power_get_if_enabled(dev_priv,
646 						     POWER_DOMAIN_DISPLAY_CORE);
647 	if (!wakeref)
648 		return false;
649 
650 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
651 	hw_state->wrpll = val;
652 
653 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
654 
655 	return val & WRPLL_PLL_ENABLE;
656 }
657 
658 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
659 				      struct intel_shared_dpll *pll,
660 				      struct intel_dpll_hw_state *hw_state)
661 {
662 	intel_wakeref_t wakeref;
663 	u32 val;
664 
665 	wakeref = intel_display_power_get_if_enabled(dev_priv,
666 						     POWER_DOMAIN_DISPLAY_CORE);
667 	if (!wakeref)
668 		return false;
669 
670 	val = intel_de_read(dev_priv, SPLL_CTL);
671 	hw_state->spll = val;
672 
673 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
674 
675 	return val & SPLL_PLL_ENABLE;
676 }
677 
678 #define LC_FREQ 2700
679 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
680 
681 #define P_MIN 2
682 #define P_MAX 64
683 #define P_INC 2
684 
685 /* Constraints for PLL good behavior */
686 #define REF_MIN 48
687 #define REF_MAX 400
688 #define VCO_MIN 2400
689 #define VCO_MAX 4800
690 
691 struct hsw_wrpll_rnp {
692 	unsigned p, n2, r2;
693 };
694 
695 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
696 {
697 	unsigned budget;
698 
699 	switch (clock) {
700 	case 25175000:
701 	case 25200000:
702 	case 27000000:
703 	case 27027000:
704 	case 37762500:
705 	case 37800000:
706 	case 40500000:
707 	case 40541000:
708 	case 54000000:
709 	case 54054000:
710 	case 59341000:
711 	case 59400000:
712 	case 72000000:
713 	case 74176000:
714 	case 74250000:
715 	case 81000000:
716 	case 81081000:
717 	case 89012000:
718 	case 89100000:
719 	case 108000000:
720 	case 108108000:
721 	case 111264000:
722 	case 111375000:
723 	case 148352000:
724 	case 148500000:
725 	case 162000000:
726 	case 162162000:
727 	case 222525000:
728 	case 222750000:
729 	case 296703000:
730 	case 297000000:
731 		budget = 0;
732 		break;
733 	case 233500000:
734 	case 245250000:
735 	case 247750000:
736 	case 253250000:
737 	case 298000000:
738 		budget = 1500;
739 		break;
740 	case 169128000:
741 	case 169500000:
742 	case 179500000:
743 	case 202000000:
744 		budget = 2000;
745 		break;
746 	case 256250000:
747 	case 262500000:
748 	case 270000000:
749 	case 272500000:
750 	case 273750000:
751 	case 280750000:
752 	case 281250000:
753 	case 286000000:
754 	case 291750000:
755 		budget = 4000;
756 		break;
757 	case 267250000:
758 	case 268500000:
759 		budget = 5000;
760 		break;
761 	default:
762 		budget = 1000;
763 		break;
764 	}
765 
766 	return budget;
767 }
768 
769 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
770 				 unsigned int r2, unsigned int n2,
771 				 unsigned int p,
772 				 struct hsw_wrpll_rnp *best)
773 {
774 	u64 a, b, c, d, diff, diff_best;
775 
776 	/* No best (r,n,p) yet */
777 	if (best->p == 0) {
778 		best->p = p;
779 		best->n2 = n2;
780 		best->r2 = r2;
781 		return;
782 	}
783 
784 	/*
785 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
786 	 * freq2k.
787 	 *
788 	 * delta = 1e6 *
789 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
790 	 *	   freq2k;
791 	 *
792 	 * and we would like delta <= budget.
793 	 *
794 	 * If the discrepancy is above the PPM-based budget, always prefer to
795 	 * improve upon the previous solution.  However, if you're within the
796 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
797 	 */
798 	a = freq2k * budget * p * r2;
799 	b = freq2k * budget * best->p * best->r2;
800 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
801 	diff_best = abs_diff(freq2k * best->p * best->r2,
802 			     LC_FREQ_2K * best->n2);
803 	c = 1000000 * diff;
804 	d = 1000000 * diff_best;
805 
806 	if (a < c && b < d) {
807 		/* If both are above the budget, pick the closer */
808 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
809 			best->p = p;
810 			best->n2 = n2;
811 			best->r2 = r2;
812 		}
813 	} else if (a >= c && b < d) {
814 		/* If A is below the threshold but B is above it?  Update. */
815 		best->p = p;
816 		best->n2 = n2;
817 		best->r2 = r2;
818 	} else if (a >= c && b >= d) {
819 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
820 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
821 			best->p = p;
822 			best->n2 = n2;
823 			best->r2 = r2;
824 		}
825 	}
826 	/* Otherwise a < c && b >= d, do nothing */
827 }
828 
829 static void
830 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
831 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
832 {
833 	u64 freq2k;
834 	unsigned p, n2, r2;
835 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
836 	unsigned budget;
837 
838 	freq2k = clock / 100;
839 
840 	budget = hsw_wrpll_get_budget_for_freq(clock);
841 
842 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
843 	 * and directly pass the LC PLL to it. */
844 	if (freq2k == 5400000) {
845 		*n2_out = 2;
846 		*p_out = 1;
847 		*r2_out = 2;
848 		return;
849 	}
850 
851 	/*
852 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
853 	 * the WR PLL.
854 	 *
855 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
856 	 * Injecting R2 = 2 * R gives:
857 	 *   REF_MAX * r2 > LC_FREQ * 2 and
858 	 *   REF_MIN * r2 < LC_FREQ * 2
859 	 *
860 	 * Which means the desired boundaries for r2 are:
861 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
862 	 *
863 	 */
864 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
865 	     r2 <= LC_FREQ * 2 / REF_MIN;
866 	     r2++) {
867 
868 		/*
869 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
870 		 *
871 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
872 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
873 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
874 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
875 		 *
876 		 * Which means the desired boundaries for n2 are:
877 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
878 		 */
879 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
880 		     n2 <= VCO_MAX * r2 / LC_FREQ;
881 		     n2++) {
882 
883 			for (p = P_MIN; p <= P_MAX; p += P_INC)
884 				hsw_wrpll_update_rnp(freq2k, budget,
885 						     r2, n2, p, &best);
886 		}
887 	}
888 
889 	*n2_out = best.n2;
890 	*p_out = best.p;
891 	*r2_out = best.r2;
892 }
893 
894 static struct intel_shared_dpll *
895 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
896 		       struct intel_crtc *crtc)
897 {
898 	struct intel_crtc_state *crtc_state =
899 		intel_atomic_get_new_crtc_state(state, crtc);
900 	struct intel_shared_dpll *pll;
901 	u32 val;
902 	unsigned int p, n2, r2;
903 
904 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
905 
906 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
907 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
908 	      WRPLL_DIVIDER_POST(p);
909 
910 	crtc_state->dpll_hw_state.wrpll = val;
911 
912 	pll = intel_find_shared_dpll(state, crtc,
913 				     &crtc_state->dpll_hw_state,
914 				     BIT(DPLL_ID_WRPLL2) |
915 				     BIT(DPLL_ID_WRPLL1));
916 
917 	if (!pll)
918 		return NULL;
919 
920 	return pll;
921 }
922 
923 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
924 				  const struct intel_shared_dpll *pll,
925 				  const struct intel_dpll_hw_state *pll_state)
926 {
927 	int refclk;
928 	int n, p, r;
929 	u32 wrpll = pll_state->wrpll;
930 
931 	switch (wrpll & WRPLL_REF_MASK) {
932 	case WRPLL_REF_SPECIAL_HSW:
933 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
934 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
935 			refclk = dev_priv->dpll.ref_clks.nssc;
936 			break;
937 		}
938 		fallthrough;
939 	case WRPLL_REF_PCH_SSC:
940 		/*
941 		 * We could calculate spread here, but our checking
942 		 * code only cares about 5% accuracy, and spread is a max of
943 		 * 0.5% downspread.
944 		 */
945 		refclk = dev_priv->dpll.ref_clks.ssc;
946 		break;
947 	case WRPLL_REF_LCPLL:
948 		refclk = 2700000;
949 		break;
950 	default:
951 		MISSING_CASE(wrpll);
952 		return 0;
953 	}
954 
955 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
956 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
957 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
958 
959 	/* Convert to KHz, p & r have a fixed point portion */
960 	return (refclk * n / 10) / (p * r) * 2;
961 }
962 
963 static struct intel_shared_dpll *
964 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
965 {
966 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
967 	struct intel_shared_dpll *pll;
968 	enum intel_dpll_id pll_id;
969 	int clock = crtc_state->port_clock;
970 
971 	switch (clock / 2) {
972 	case 81000:
973 		pll_id = DPLL_ID_LCPLL_810;
974 		break;
975 	case 135000:
976 		pll_id = DPLL_ID_LCPLL_1350;
977 		break;
978 	case 270000:
979 		pll_id = DPLL_ID_LCPLL_2700;
980 		break;
981 	default:
982 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
983 			    clock);
984 		return NULL;
985 	}
986 
987 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
988 
989 	if (!pll)
990 		return NULL;
991 
992 	return pll;
993 }
994 
995 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
996 				  const struct intel_shared_dpll *pll,
997 				  const struct intel_dpll_hw_state *pll_state)
998 {
999 	int link_clock = 0;
1000 
1001 	switch (pll->info->id) {
1002 	case DPLL_ID_LCPLL_810:
1003 		link_clock = 81000;
1004 		break;
1005 	case DPLL_ID_LCPLL_1350:
1006 		link_clock = 135000;
1007 		break;
1008 	case DPLL_ID_LCPLL_2700:
1009 		link_clock = 270000;
1010 		break;
1011 	default:
1012 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1013 		break;
1014 	}
1015 
1016 	return link_clock * 2;
1017 }
1018 
1019 static struct intel_shared_dpll *
1020 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1021 		      struct intel_crtc *crtc)
1022 {
1023 	struct intel_crtc_state *crtc_state =
1024 		intel_atomic_get_new_crtc_state(state, crtc);
1025 
1026 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1027 		return NULL;
1028 
1029 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1030 					 SPLL_REF_MUXED_SSC;
1031 
1032 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1033 				      BIT(DPLL_ID_SPLL));
1034 }
1035 
1036 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1037 				 const struct intel_shared_dpll *pll,
1038 				 const struct intel_dpll_hw_state *pll_state)
1039 {
1040 	int link_clock = 0;
1041 
1042 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1043 	case SPLL_FREQ_810MHz:
1044 		link_clock = 81000;
1045 		break;
1046 	case SPLL_FREQ_1350MHz:
1047 		link_clock = 135000;
1048 		break;
1049 	case SPLL_FREQ_2700MHz:
1050 		link_clock = 270000;
1051 		break;
1052 	default:
1053 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1054 		break;
1055 	}
1056 
1057 	return link_clock * 2;
1058 }
1059 
1060 static bool hsw_get_dpll(struct intel_atomic_state *state,
1061 			 struct intel_crtc *crtc,
1062 			 struct intel_encoder *encoder)
1063 {
1064 	struct intel_crtc_state *crtc_state =
1065 		intel_atomic_get_new_crtc_state(state, crtc);
1066 	struct intel_shared_dpll *pll;
1067 
1068 	memset(&crtc_state->dpll_hw_state, 0,
1069 	       sizeof(crtc_state->dpll_hw_state));
1070 
1071 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1072 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1073 	else if (intel_crtc_has_dp_encoder(crtc_state))
1074 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1075 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1076 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1077 	else
1078 		return false;
1079 
1080 	if (!pll)
1081 		return false;
1082 
1083 	intel_reference_shared_dpll(state, crtc,
1084 				    pll, &crtc_state->dpll_hw_state);
1085 
1086 	crtc_state->shared_dpll = pll;
1087 
1088 	return true;
1089 }
1090 
1091 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1092 {
1093 	i915->dpll.ref_clks.ssc = 135000;
1094 	/* Non-SSC is only used on non-ULT HSW. */
1095 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1096 		i915->dpll.ref_clks.nssc = 24000;
1097 	else
1098 		i915->dpll.ref_clks.nssc = 135000;
1099 }
1100 
1101 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1102 			      const struct intel_dpll_hw_state *hw_state)
1103 {
1104 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1105 		    hw_state->wrpll, hw_state->spll);
1106 }
1107 
1108 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1109 	.enable = hsw_ddi_wrpll_enable,
1110 	.disable = hsw_ddi_wrpll_disable,
1111 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1112 	.get_freq = hsw_ddi_wrpll_get_freq,
1113 };
1114 
1115 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1116 	.enable = hsw_ddi_spll_enable,
1117 	.disable = hsw_ddi_spll_disable,
1118 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1119 	.get_freq = hsw_ddi_spll_get_freq,
1120 };
1121 
1122 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1123 				 struct intel_shared_dpll *pll)
1124 {
1125 }
1126 
1127 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1128 				  struct intel_shared_dpll *pll)
1129 {
1130 }
1131 
1132 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1133 				       struct intel_shared_dpll *pll,
1134 				       struct intel_dpll_hw_state *hw_state)
1135 {
1136 	return true;
1137 }
1138 
1139 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1140 	.enable = hsw_ddi_lcpll_enable,
1141 	.disable = hsw_ddi_lcpll_disable,
1142 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1143 	.get_freq = hsw_ddi_lcpll_get_freq,
1144 };
1145 
1146 static const struct dpll_info hsw_plls[] = {
1147 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1148 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1149 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1150 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1151 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1152 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1153 	{ },
1154 };
1155 
1156 static const struct intel_dpll_mgr hsw_pll_mgr = {
1157 	.dpll_info = hsw_plls,
1158 	.get_dplls = hsw_get_dpll,
1159 	.put_dplls = intel_put_dpll,
1160 	.update_ref_clks = hsw_update_dpll_ref_clks,
1161 	.dump_hw_state = hsw_dump_hw_state,
1162 };
1163 
1164 struct skl_dpll_regs {
1165 	i915_reg_t ctl, cfgcr1, cfgcr2;
1166 };
1167 
1168 /* this array is indexed by the *shared* pll id */
1169 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1170 	{
1171 		/* DPLL 0 */
1172 		.ctl = LCPLL1_CTL,
1173 		/* DPLL 0 doesn't support HDMI mode */
1174 	},
1175 	{
1176 		/* DPLL 1 */
1177 		.ctl = LCPLL2_CTL,
1178 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1179 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1180 	},
1181 	{
1182 		/* DPLL 2 */
1183 		.ctl = WRPLL_CTL(0),
1184 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1185 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1186 	},
1187 	{
1188 		/* DPLL 3 */
1189 		.ctl = WRPLL_CTL(1),
1190 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1191 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1192 	},
1193 };
1194 
1195 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1196 				    struct intel_shared_dpll *pll)
1197 {
1198 	const enum intel_dpll_id id = pll->info->id;
1199 	u32 val;
1200 
1201 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1202 
1203 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1204 		 DPLL_CTRL1_SSC(id) |
1205 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1206 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1207 
1208 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1209 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1210 }
1211 
1212 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1213 			       struct intel_shared_dpll *pll)
1214 {
1215 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1216 	const enum intel_dpll_id id = pll->info->id;
1217 
1218 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1219 
1220 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1221 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1222 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1223 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1224 
1225 	/* the enable bit is always bit 31 */
1226 	intel_de_write(dev_priv, regs[id].ctl,
1227 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1228 
1229 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1230 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1231 }
1232 
1233 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1234 				 struct intel_shared_dpll *pll)
1235 {
1236 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1237 }
1238 
1239 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1240 				struct intel_shared_dpll *pll)
1241 {
1242 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1243 	const enum intel_dpll_id id = pll->info->id;
1244 
1245 	/* the enable bit is always bit 31 */
1246 	intel_de_write(dev_priv, regs[id].ctl,
1247 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1248 	intel_de_posting_read(dev_priv, regs[id].ctl);
1249 }
1250 
1251 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1252 				  struct intel_shared_dpll *pll)
1253 {
1254 }
1255 
1256 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1257 				     struct intel_shared_dpll *pll,
1258 				     struct intel_dpll_hw_state *hw_state)
1259 {
1260 	u32 val;
1261 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1262 	const enum intel_dpll_id id = pll->info->id;
1263 	intel_wakeref_t wakeref;
1264 	bool ret;
1265 
1266 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1267 						     POWER_DOMAIN_DISPLAY_CORE);
1268 	if (!wakeref)
1269 		return false;
1270 
1271 	ret = false;
1272 
1273 	val = intel_de_read(dev_priv, regs[id].ctl);
1274 	if (!(val & LCPLL_PLL_ENABLE))
1275 		goto out;
1276 
1277 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1278 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1279 
1280 	/* avoid reading back stale values if HDMI mode is not enabled */
1281 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1282 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1283 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1284 	}
1285 	ret = true;
1286 
1287 out:
1288 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1289 
1290 	return ret;
1291 }
1292 
1293 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1294 				       struct intel_shared_dpll *pll,
1295 				       struct intel_dpll_hw_state *hw_state)
1296 {
1297 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1298 	const enum intel_dpll_id id = pll->info->id;
1299 	intel_wakeref_t wakeref;
1300 	u32 val;
1301 	bool ret;
1302 
1303 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1304 						     POWER_DOMAIN_DISPLAY_CORE);
1305 	if (!wakeref)
1306 		return false;
1307 
1308 	ret = false;
1309 
1310 	/* DPLL0 is always enabled since it drives CDCLK */
1311 	val = intel_de_read(dev_priv, regs[id].ctl);
1312 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1313 		goto out;
1314 
1315 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1316 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1317 
1318 	ret = true;
1319 
1320 out:
1321 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1322 
1323 	return ret;
1324 }
1325 
1326 struct skl_wrpll_context {
1327 	u64 min_deviation;		/* current minimal deviation */
1328 	u64 central_freq;		/* chosen central freq */
1329 	u64 dco_freq;			/* chosen dco freq */
1330 	unsigned int p;			/* chosen divider */
1331 };
1332 
1333 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1334 {
1335 	memset(ctx, 0, sizeof(*ctx));
1336 
1337 	ctx->min_deviation = U64_MAX;
1338 }
1339 
1340 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1341 #define SKL_DCO_MAX_PDEVIATION	100
1342 #define SKL_DCO_MAX_NDEVIATION	600
1343 
1344 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1345 				  u64 central_freq,
1346 				  u64 dco_freq,
1347 				  unsigned int divider)
1348 {
1349 	u64 deviation;
1350 
1351 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1352 			      central_freq);
1353 
1354 	/* positive deviation */
1355 	if (dco_freq >= central_freq) {
1356 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1357 		    deviation < ctx->min_deviation) {
1358 			ctx->min_deviation = deviation;
1359 			ctx->central_freq = central_freq;
1360 			ctx->dco_freq = dco_freq;
1361 			ctx->p = divider;
1362 		}
1363 	/* negative deviation */
1364 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1365 		   deviation < ctx->min_deviation) {
1366 		ctx->min_deviation = deviation;
1367 		ctx->central_freq = central_freq;
1368 		ctx->dco_freq = dco_freq;
1369 		ctx->p = divider;
1370 	}
1371 }
1372 
1373 static void skl_wrpll_get_multipliers(unsigned int p,
1374 				      unsigned int *p0 /* out */,
1375 				      unsigned int *p1 /* out */,
1376 				      unsigned int *p2 /* out */)
1377 {
1378 	/* even dividers */
1379 	if (p % 2 == 0) {
1380 		unsigned int half = p / 2;
1381 
1382 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1383 			*p0 = 2;
1384 			*p1 = 1;
1385 			*p2 = half;
1386 		} else if (half % 2 == 0) {
1387 			*p0 = 2;
1388 			*p1 = half / 2;
1389 			*p2 = 2;
1390 		} else if (half % 3 == 0) {
1391 			*p0 = 3;
1392 			*p1 = half / 3;
1393 			*p2 = 2;
1394 		} else if (half % 7 == 0) {
1395 			*p0 = 7;
1396 			*p1 = half / 7;
1397 			*p2 = 2;
1398 		}
1399 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1400 		*p0 = 3;
1401 		*p1 = 1;
1402 		*p2 = p / 3;
1403 	} else if (p == 5 || p == 7) {
1404 		*p0 = p;
1405 		*p1 = 1;
1406 		*p2 = 1;
1407 	} else if (p == 15) {
1408 		*p0 = 3;
1409 		*p1 = 1;
1410 		*p2 = 5;
1411 	} else if (p == 21) {
1412 		*p0 = 7;
1413 		*p1 = 1;
1414 		*p2 = 3;
1415 	} else if (p == 35) {
1416 		*p0 = 7;
1417 		*p1 = 1;
1418 		*p2 = 5;
1419 	}
1420 }
1421 
1422 struct skl_wrpll_params {
1423 	u32 dco_fraction;
1424 	u32 dco_integer;
1425 	u32 qdiv_ratio;
1426 	u32 qdiv_mode;
1427 	u32 kdiv;
1428 	u32 pdiv;
1429 	u32 central_freq;
1430 };
1431 
1432 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1433 				      u64 afe_clock,
1434 				      int ref_clock,
1435 				      u64 central_freq,
1436 				      u32 p0, u32 p1, u32 p2)
1437 {
1438 	u64 dco_freq;
1439 
1440 	switch (central_freq) {
1441 	case 9600000000ULL:
1442 		params->central_freq = 0;
1443 		break;
1444 	case 9000000000ULL:
1445 		params->central_freq = 1;
1446 		break;
1447 	case 8400000000ULL:
1448 		params->central_freq = 3;
1449 	}
1450 
1451 	switch (p0) {
1452 	case 1:
1453 		params->pdiv = 0;
1454 		break;
1455 	case 2:
1456 		params->pdiv = 1;
1457 		break;
1458 	case 3:
1459 		params->pdiv = 2;
1460 		break;
1461 	case 7:
1462 		params->pdiv = 4;
1463 		break;
1464 	default:
1465 		WARN(1, "Incorrect PDiv\n");
1466 	}
1467 
1468 	switch (p2) {
1469 	case 5:
1470 		params->kdiv = 0;
1471 		break;
1472 	case 2:
1473 		params->kdiv = 1;
1474 		break;
1475 	case 3:
1476 		params->kdiv = 2;
1477 		break;
1478 	case 1:
1479 		params->kdiv = 3;
1480 		break;
1481 	default:
1482 		WARN(1, "Incorrect KDiv\n");
1483 	}
1484 
1485 	params->qdiv_ratio = p1;
1486 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1487 
1488 	dco_freq = p0 * p1 * p2 * afe_clock;
1489 
1490 	/*
1491 	 * Intermediate values are in Hz.
1492 	 * Divide by MHz to match bsepc
1493 	 */
1494 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1495 	params->dco_fraction =
1496 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1497 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1498 }
1499 
1500 static bool
1501 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1502 			int ref_clock,
1503 			struct skl_wrpll_params *wrpll_params)
1504 {
1505 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1506 	u64 dco_central_freq[3] = { 8400000000ULL,
1507 				    9000000000ULL,
1508 				    9600000000ULL };
1509 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1510 					     24, 28, 30, 32, 36, 40, 42, 44,
1511 					     48, 52, 54, 56, 60, 64, 66, 68,
1512 					     70, 72, 76, 78, 80, 84, 88, 90,
1513 					     92, 96, 98 };
1514 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1515 	static const struct {
1516 		const int *list;
1517 		int n_dividers;
1518 	} dividers[] = {
1519 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1520 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1521 	};
1522 	struct skl_wrpll_context ctx;
1523 	unsigned int dco, d, i;
1524 	unsigned int p0, p1, p2;
1525 
1526 	skl_wrpll_context_init(&ctx);
1527 
1528 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1529 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1530 			for (i = 0; i < dividers[d].n_dividers; i++) {
1531 				unsigned int p = dividers[d].list[i];
1532 				u64 dco_freq = p * afe_clock;
1533 
1534 				skl_wrpll_try_divider(&ctx,
1535 						      dco_central_freq[dco],
1536 						      dco_freq,
1537 						      p);
1538 				/*
1539 				 * Skip the remaining dividers if we're sure to
1540 				 * have found the definitive divider, we can't
1541 				 * improve a 0 deviation.
1542 				 */
1543 				if (ctx.min_deviation == 0)
1544 					goto skip_remaining_dividers;
1545 			}
1546 		}
1547 
1548 skip_remaining_dividers:
1549 		/*
1550 		 * If a solution is found with an even divider, prefer
1551 		 * this one.
1552 		 */
1553 		if (d == 0 && ctx.p)
1554 			break;
1555 	}
1556 
1557 	if (!ctx.p) {
1558 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1559 		return false;
1560 	}
1561 
1562 	/*
1563 	 * gcc incorrectly analyses that these can be used without being
1564 	 * initialized. To be fair, it's hard to guess.
1565 	 */
1566 	p0 = p1 = p2 = 0;
1567 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1568 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1569 				  ctx.central_freq, p0, p1, p2);
1570 
1571 	return true;
1572 }
1573 
1574 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1575 {
1576 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1577 	u32 ctrl1, cfgcr1, cfgcr2;
1578 	struct skl_wrpll_params wrpll_params = { 0, };
1579 
1580 	/*
1581 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1582 	 * as the DPLL id in this function.
1583 	 */
1584 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1585 
1586 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1587 
1588 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1589 				     i915->dpll.ref_clks.nssc,
1590 				     &wrpll_params))
1591 		return false;
1592 
1593 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1594 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1595 		wrpll_params.dco_integer;
1596 
1597 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1598 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1599 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1600 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1601 		wrpll_params.central_freq;
1602 
1603 	memset(&crtc_state->dpll_hw_state, 0,
1604 	       sizeof(crtc_state->dpll_hw_state));
1605 
1606 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1607 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1608 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1609 	return true;
1610 }
1611 
1612 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1613 				  const struct intel_shared_dpll *pll,
1614 				  const struct intel_dpll_hw_state *pll_state)
1615 {
1616 	int ref_clock = i915->dpll.ref_clks.nssc;
1617 	u32 p0, p1, p2, dco_freq;
1618 
1619 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1620 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1621 
1622 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1623 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1624 	else
1625 		p1 = 1;
1626 
1627 
1628 	switch (p0) {
1629 	case DPLL_CFGCR2_PDIV_1:
1630 		p0 = 1;
1631 		break;
1632 	case DPLL_CFGCR2_PDIV_2:
1633 		p0 = 2;
1634 		break;
1635 	case DPLL_CFGCR2_PDIV_3:
1636 		p0 = 3;
1637 		break;
1638 	case DPLL_CFGCR2_PDIV_7_INVALID:
1639 		/*
1640 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1641 		 * handling it the same way as PDIV_7.
1642 		 */
1643 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1644 		fallthrough;
1645 	case DPLL_CFGCR2_PDIV_7:
1646 		p0 = 7;
1647 		break;
1648 	default:
1649 		MISSING_CASE(p0);
1650 		return 0;
1651 	}
1652 
1653 	switch (p2) {
1654 	case DPLL_CFGCR2_KDIV_5:
1655 		p2 = 5;
1656 		break;
1657 	case DPLL_CFGCR2_KDIV_2:
1658 		p2 = 2;
1659 		break;
1660 	case DPLL_CFGCR2_KDIV_3:
1661 		p2 = 3;
1662 		break;
1663 	case DPLL_CFGCR2_KDIV_1:
1664 		p2 = 1;
1665 		break;
1666 	default:
1667 		MISSING_CASE(p2);
1668 		return 0;
1669 	}
1670 
1671 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1672 		   ref_clock;
1673 
1674 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1675 		    ref_clock / 0x8000;
1676 
1677 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1678 		return 0;
1679 
1680 	return dco_freq / (p0 * p1 * p2 * 5);
1681 }
1682 
1683 static bool
1684 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1685 {
1686 	u32 ctrl1;
1687 
1688 	/*
1689 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1690 	 * as the DPLL id in this function.
1691 	 */
1692 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1693 	switch (crtc_state->port_clock / 2) {
1694 	case 81000:
1695 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1696 		break;
1697 	case 135000:
1698 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1699 		break;
1700 	case 270000:
1701 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1702 		break;
1703 		/* eDP 1.4 rates */
1704 	case 162000:
1705 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1706 		break;
1707 	case 108000:
1708 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1709 		break;
1710 	case 216000:
1711 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1712 		break;
1713 	}
1714 
1715 	memset(&crtc_state->dpll_hw_state, 0,
1716 	       sizeof(crtc_state->dpll_hw_state));
1717 
1718 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1719 
1720 	return true;
1721 }
1722 
1723 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1724 				  const struct intel_shared_dpll *pll,
1725 				  const struct intel_dpll_hw_state *pll_state)
1726 {
1727 	int link_clock = 0;
1728 
1729 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1730 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1731 	case DPLL_CTRL1_LINK_RATE_810:
1732 		link_clock = 81000;
1733 		break;
1734 	case DPLL_CTRL1_LINK_RATE_1080:
1735 		link_clock = 108000;
1736 		break;
1737 	case DPLL_CTRL1_LINK_RATE_1350:
1738 		link_clock = 135000;
1739 		break;
1740 	case DPLL_CTRL1_LINK_RATE_1620:
1741 		link_clock = 162000;
1742 		break;
1743 	case DPLL_CTRL1_LINK_RATE_2160:
1744 		link_clock = 216000;
1745 		break;
1746 	case DPLL_CTRL1_LINK_RATE_2700:
1747 		link_clock = 270000;
1748 		break;
1749 	default:
1750 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1751 		break;
1752 	}
1753 
1754 	return link_clock * 2;
1755 }
1756 
1757 static bool skl_get_dpll(struct intel_atomic_state *state,
1758 			 struct intel_crtc *crtc,
1759 			 struct intel_encoder *encoder)
1760 {
1761 	struct intel_crtc_state *crtc_state =
1762 		intel_atomic_get_new_crtc_state(state, crtc);
1763 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1764 	struct intel_shared_dpll *pll;
1765 	bool bret;
1766 
1767 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1768 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1769 		if (!bret) {
1770 			drm_dbg_kms(&i915->drm,
1771 				    "Could not get HDMI pll dividers.\n");
1772 			return false;
1773 		}
1774 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1775 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1776 		if (!bret) {
1777 			drm_dbg_kms(&i915->drm,
1778 				    "Could not set DP dpll HW state.\n");
1779 			return false;
1780 		}
1781 	} else {
1782 		return false;
1783 	}
1784 
1785 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1786 		pll = intel_find_shared_dpll(state, crtc,
1787 					     &crtc_state->dpll_hw_state,
1788 					     BIT(DPLL_ID_SKL_DPLL0));
1789 	else
1790 		pll = intel_find_shared_dpll(state, crtc,
1791 					     &crtc_state->dpll_hw_state,
1792 					     BIT(DPLL_ID_SKL_DPLL3) |
1793 					     BIT(DPLL_ID_SKL_DPLL2) |
1794 					     BIT(DPLL_ID_SKL_DPLL1));
1795 	if (!pll)
1796 		return false;
1797 
1798 	intel_reference_shared_dpll(state, crtc,
1799 				    pll, &crtc_state->dpll_hw_state);
1800 
1801 	crtc_state->shared_dpll = pll;
1802 
1803 	return true;
1804 }
1805 
1806 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1807 				const struct intel_shared_dpll *pll,
1808 				const struct intel_dpll_hw_state *pll_state)
1809 {
1810 	/*
1811 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1812 	 * the internal shift for each field
1813 	 */
1814 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1815 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1816 	else
1817 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1818 }
1819 
1820 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1821 {
1822 	/* No SSC ref */
1823 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1824 }
1825 
1826 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1827 			      const struct intel_dpll_hw_state *hw_state)
1828 {
1829 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1830 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1831 		      hw_state->ctrl1,
1832 		      hw_state->cfgcr1,
1833 		      hw_state->cfgcr2);
1834 }
1835 
1836 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1837 	.enable = skl_ddi_pll_enable,
1838 	.disable = skl_ddi_pll_disable,
1839 	.get_hw_state = skl_ddi_pll_get_hw_state,
1840 	.get_freq = skl_ddi_pll_get_freq,
1841 };
1842 
1843 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1844 	.enable = skl_ddi_dpll0_enable,
1845 	.disable = skl_ddi_dpll0_disable,
1846 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1847 	.get_freq = skl_ddi_pll_get_freq,
1848 };
1849 
1850 static const struct dpll_info skl_plls[] = {
1851 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1852 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1853 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1854 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1855 	{ },
1856 };
1857 
1858 static const struct intel_dpll_mgr skl_pll_mgr = {
1859 	.dpll_info = skl_plls,
1860 	.get_dplls = skl_get_dpll,
1861 	.put_dplls = intel_put_dpll,
1862 	.update_ref_clks = skl_update_dpll_ref_clks,
1863 	.dump_hw_state = skl_dump_hw_state,
1864 };
1865 
1866 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1867 				struct intel_shared_dpll *pll)
1868 {
1869 	u32 temp;
1870 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1871 	enum dpio_phy phy;
1872 	enum dpio_channel ch;
1873 
1874 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1875 
1876 	/* Non-SSC reference */
1877 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1878 	temp |= PORT_PLL_REF_SEL;
1879 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1880 
1881 	if (IS_GEMINILAKE(dev_priv)) {
1882 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1883 		temp |= PORT_PLL_POWER_ENABLE;
1884 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1885 
1886 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1887 				 PORT_PLL_POWER_STATE), 200))
1888 			drm_err(&dev_priv->drm,
1889 				"Power state not set for PLL:%d\n", port);
1890 	}
1891 
1892 	/* Disable 10 bit clock */
1893 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1894 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1895 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1896 
1897 	/* Write P1 & P2 */
1898 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1899 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1900 	temp |= pll->state.hw_state.ebb0;
1901 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1902 
1903 	/* Write M2 integer */
1904 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1905 	temp &= ~PORT_PLL_M2_MASK;
1906 	temp |= pll->state.hw_state.pll0;
1907 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1908 
1909 	/* Write N */
1910 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1911 	temp &= ~PORT_PLL_N_MASK;
1912 	temp |= pll->state.hw_state.pll1;
1913 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1914 
1915 	/* Write M2 fraction */
1916 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1917 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1918 	temp |= pll->state.hw_state.pll2;
1919 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1920 
1921 	/* Write M2 fraction enable */
1922 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1923 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1924 	temp |= pll->state.hw_state.pll3;
1925 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1926 
1927 	/* Write coeff */
1928 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1929 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1930 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1931 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1932 	temp |= pll->state.hw_state.pll6;
1933 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1934 
1935 	/* Write calibration val */
1936 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1937 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1938 	temp |= pll->state.hw_state.pll8;
1939 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1940 
1941 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1942 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1943 	temp |= pll->state.hw_state.pll9;
1944 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1945 
1946 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1947 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1948 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1949 	temp |= pll->state.hw_state.pll10;
1950 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1951 
1952 	/* Recalibrate with new settings */
1953 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1954 	temp |= PORT_PLL_RECALIBRATE;
1955 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1956 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1957 	temp |= pll->state.hw_state.ebb4;
1958 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1959 
1960 	/* Enable PLL */
1961 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1962 	temp |= PORT_PLL_ENABLE;
1963 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1964 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1965 
1966 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1967 			200))
1968 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1969 
1970 	if (IS_GEMINILAKE(dev_priv)) {
1971 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1972 		temp |= DCC_DELAY_RANGE_2;
1973 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1974 	}
1975 
1976 	/*
1977 	 * While we write to the group register to program all lanes at once we
1978 	 * can read only lane registers and we pick lanes 0/1 for that.
1979 	 */
1980 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1981 	temp &= ~LANE_STAGGER_MASK;
1982 	temp &= ~LANESTAGGER_STRAP_OVRD;
1983 	temp |= pll->state.hw_state.pcsdw12;
1984 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1985 }
1986 
1987 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1988 					struct intel_shared_dpll *pll)
1989 {
1990 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1991 	u32 temp;
1992 
1993 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1994 	temp &= ~PORT_PLL_ENABLE;
1995 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1996 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1997 
1998 	if (IS_GEMINILAKE(dev_priv)) {
1999 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2000 		temp &= ~PORT_PLL_POWER_ENABLE;
2001 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2002 
2003 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2004 				  PORT_PLL_POWER_STATE), 200))
2005 			drm_err(&dev_priv->drm,
2006 				"Power state not reset for PLL:%d\n", port);
2007 	}
2008 }
2009 
2010 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2011 					struct intel_shared_dpll *pll,
2012 					struct intel_dpll_hw_state *hw_state)
2013 {
2014 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2015 	intel_wakeref_t wakeref;
2016 	enum dpio_phy phy;
2017 	enum dpio_channel ch;
2018 	u32 val;
2019 	bool ret;
2020 
2021 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2022 
2023 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2024 						     POWER_DOMAIN_DISPLAY_CORE);
2025 	if (!wakeref)
2026 		return false;
2027 
2028 	ret = false;
2029 
2030 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2031 	if (!(val & PORT_PLL_ENABLE))
2032 		goto out;
2033 
2034 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2035 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2036 
2037 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2038 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2039 
2040 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2041 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2042 
2043 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2044 	hw_state->pll1 &= PORT_PLL_N_MASK;
2045 
2046 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2047 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2048 
2049 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2050 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2051 
2052 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2053 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2054 			  PORT_PLL_INT_COEFF_MASK |
2055 			  PORT_PLL_GAIN_CTL_MASK;
2056 
2057 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2058 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2059 
2060 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2061 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2062 
2063 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2064 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2065 			   PORT_PLL_DCO_AMP_MASK;
2066 
2067 	/*
2068 	 * While we write to the group register to program all lanes at once we
2069 	 * can read only lane registers. We configure all lanes the same way, so
2070 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2071 	 */
2072 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2073 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2074 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2075 		drm_dbg(&dev_priv->drm,
2076 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2077 			hw_state->pcsdw12,
2078 			intel_de_read(dev_priv,
2079 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2080 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2081 
2082 	ret = true;
2083 
2084 out:
2085 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2086 
2087 	return ret;
2088 }
2089 
2090 /* bxt clock parameters */
2091 struct bxt_clk_div {
2092 	int clock;
2093 	u32 p1;
2094 	u32 p2;
2095 	u32 m2_int;
2096 	u32 m2_frac;
2097 	bool m2_frac_en;
2098 	u32 n;
2099 
2100 	int vco;
2101 };
2102 
2103 /* pre-calculated values for DP linkrates */
2104 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2105 	{162000, 4, 2, 32, 1677722, 1, 1},
2106 	{270000, 4, 1, 27,       0, 0, 1},
2107 	{540000, 2, 1, 27,       0, 0, 1},
2108 	{216000, 3, 2, 32, 1677722, 1, 1},
2109 	{243000, 4, 1, 24, 1258291, 1, 1},
2110 	{324000, 4, 1, 32, 1677722, 1, 1},
2111 	{432000, 3, 1, 32, 1677722, 1, 1}
2112 };
2113 
2114 static bool
2115 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2116 			  struct bxt_clk_div *clk_div)
2117 {
2118 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2119 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2120 	struct dpll best_clock;
2121 
2122 	/* Calculate HDMI div */
2123 	/*
2124 	 * FIXME: tie the following calculation into
2125 	 * i9xx_crtc_compute_clock
2126 	 */
2127 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2128 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2129 			crtc_state->port_clock,
2130 			pipe_name(crtc->pipe));
2131 		return false;
2132 	}
2133 
2134 	clk_div->p1 = best_clock.p1;
2135 	clk_div->p2 = best_clock.p2;
2136 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2137 	clk_div->n = best_clock.n;
2138 	clk_div->m2_int = best_clock.m2 >> 22;
2139 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2140 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2141 
2142 	clk_div->vco = best_clock.vco;
2143 
2144 	return true;
2145 }
2146 
2147 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2148 				    struct bxt_clk_div *clk_div)
2149 {
2150 	int clock = crtc_state->port_clock;
2151 	int i;
2152 
2153 	*clk_div = bxt_dp_clk_val[0];
2154 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2155 		if (bxt_dp_clk_val[i].clock == clock) {
2156 			*clk_div = bxt_dp_clk_val[i];
2157 			break;
2158 		}
2159 	}
2160 
2161 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2162 }
2163 
2164 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2165 				      const struct bxt_clk_div *clk_div)
2166 {
2167 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2168 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2169 	int clock = crtc_state->port_clock;
2170 	int vco = clk_div->vco;
2171 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2172 	u32 lanestagger;
2173 
2174 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2175 
2176 	if (vco >= 6200000 && vco <= 6700000) {
2177 		prop_coef = 4;
2178 		int_coef = 9;
2179 		gain_ctl = 3;
2180 		targ_cnt = 8;
2181 	} else if ((vco > 5400000 && vco < 6200000) ||
2182 			(vco >= 4800000 && vco < 5400000)) {
2183 		prop_coef = 5;
2184 		int_coef = 11;
2185 		gain_ctl = 3;
2186 		targ_cnt = 9;
2187 	} else if (vco == 5400000) {
2188 		prop_coef = 3;
2189 		int_coef = 8;
2190 		gain_ctl = 1;
2191 		targ_cnt = 9;
2192 	} else {
2193 		drm_err(&i915->drm, "Invalid VCO\n");
2194 		return false;
2195 	}
2196 
2197 	if (clock > 270000)
2198 		lanestagger = 0x18;
2199 	else if (clock > 135000)
2200 		lanestagger = 0x0d;
2201 	else if (clock > 67000)
2202 		lanestagger = 0x07;
2203 	else if (clock > 33000)
2204 		lanestagger = 0x04;
2205 	else
2206 		lanestagger = 0x02;
2207 
2208 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2209 	dpll_hw_state->pll0 = clk_div->m2_int;
2210 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2211 	dpll_hw_state->pll2 = clk_div->m2_frac;
2212 
2213 	if (clk_div->m2_frac_en)
2214 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2215 
2216 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2217 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2218 
2219 	dpll_hw_state->pll8 = targ_cnt;
2220 
2221 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2222 
2223 	dpll_hw_state->pll10 =
2224 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2225 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2226 
2227 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2228 
2229 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2230 
2231 	return true;
2232 }
2233 
2234 static bool
2235 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2236 {
2237 	struct bxt_clk_div clk_div = {};
2238 
2239 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2240 
2241 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2242 }
2243 
2244 static bool
2245 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2246 {
2247 	struct bxt_clk_div clk_div = {};
2248 
2249 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2250 
2251 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2252 }
2253 
2254 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2255 				const struct intel_shared_dpll *pll,
2256 				const struct intel_dpll_hw_state *pll_state)
2257 {
2258 	struct dpll clock;
2259 
2260 	clock.m1 = 2;
2261 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2262 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2263 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2264 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2265 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2266 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2267 
2268 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2269 }
2270 
2271 static bool bxt_get_dpll(struct intel_atomic_state *state,
2272 			 struct intel_crtc *crtc,
2273 			 struct intel_encoder *encoder)
2274 {
2275 	struct intel_crtc_state *crtc_state =
2276 		intel_atomic_get_new_crtc_state(state, crtc);
2277 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2278 	struct intel_shared_dpll *pll;
2279 	enum intel_dpll_id id;
2280 
2281 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2282 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2283 		return false;
2284 
2285 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2286 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2287 		return false;
2288 
2289 	/* 1:1 mapping between ports and PLLs */
2290 	id = (enum intel_dpll_id) encoder->port;
2291 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2292 
2293 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2294 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2295 
2296 	intel_reference_shared_dpll(state, crtc,
2297 				    pll, &crtc_state->dpll_hw_state);
2298 
2299 	crtc_state->shared_dpll = pll;
2300 
2301 	return true;
2302 }
2303 
2304 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2305 {
2306 	i915->dpll.ref_clks.ssc = 100000;
2307 	i915->dpll.ref_clks.nssc = 100000;
2308 	/* DSI non-SSC ref 19.2MHz */
2309 }
2310 
2311 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2312 			      const struct intel_dpll_hw_state *hw_state)
2313 {
2314 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2315 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2316 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2317 		    hw_state->ebb0,
2318 		    hw_state->ebb4,
2319 		    hw_state->pll0,
2320 		    hw_state->pll1,
2321 		    hw_state->pll2,
2322 		    hw_state->pll3,
2323 		    hw_state->pll6,
2324 		    hw_state->pll8,
2325 		    hw_state->pll9,
2326 		    hw_state->pll10,
2327 		    hw_state->pcsdw12);
2328 }
2329 
2330 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2331 	.enable = bxt_ddi_pll_enable,
2332 	.disable = bxt_ddi_pll_disable,
2333 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2334 	.get_freq = bxt_ddi_pll_get_freq,
2335 };
2336 
2337 static const struct dpll_info bxt_plls[] = {
2338 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2339 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2340 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2341 	{ },
2342 };
2343 
2344 static const struct intel_dpll_mgr bxt_pll_mgr = {
2345 	.dpll_info = bxt_plls,
2346 	.get_dplls = bxt_get_dpll,
2347 	.put_dplls = intel_put_dpll,
2348 	.update_ref_clks = bxt_update_dpll_ref_clks,
2349 	.dump_hw_state = bxt_dump_hw_state,
2350 };
2351 
2352 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2353 				      int *qdiv, int *kdiv)
2354 {
2355 	/* even dividers */
2356 	if (bestdiv % 2 == 0) {
2357 		if (bestdiv == 2) {
2358 			*pdiv = 2;
2359 			*qdiv = 1;
2360 			*kdiv = 1;
2361 		} else if (bestdiv % 4 == 0) {
2362 			*pdiv = 2;
2363 			*qdiv = bestdiv / 4;
2364 			*kdiv = 2;
2365 		} else if (bestdiv % 6 == 0) {
2366 			*pdiv = 3;
2367 			*qdiv = bestdiv / 6;
2368 			*kdiv = 2;
2369 		} else if (bestdiv % 5 == 0) {
2370 			*pdiv = 5;
2371 			*qdiv = bestdiv / 10;
2372 			*kdiv = 2;
2373 		} else if (bestdiv % 14 == 0) {
2374 			*pdiv = 7;
2375 			*qdiv = bestdiv / 14;
2376 			*kdiv = 2;
2377 		}
2378 	} else {
2379 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2380 			*pdiv = bestdiv;
2381 			*qdiv = 1;
2382 			*kdiv = 1;
2383 		} else { /* 9, 15, 21 */
2384 			*pdiv = bestdiv / 3;
2385 			*qdiv = 1;
2386 			*kdiv = 3;
2387 		}
2388 	}
2389 }
2390 
2391 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2392 				      u32 dco_freq, u32 ref_freq,
2393 				      int pdiv, int qdiv, int kdiv)
2394 {
2395 	u32 dco;
2396 
2397 	switch (kdiv) {
2398 	case 1:
2399 		params->kdiv = 1;
2400 		break;
2401 	case 2:
2402 		params->kdiv = 2;
2403 		break;
2404 	case 3:
2405 		params->kdiv = 4;
2406 		break;
2407 	default:
2408 		WARN(1, "Incorrect KDiv\n");
2409 	}
2410 
2411 	switch (pdiv) {
2412 	case 2:
2413 		params->pdiv = 1;
2414 		break;
2415 	case 3:
2416 		params->pdiv = 2;
2417 		break;
2418 	case 5:
2419 		params->pdiv = 4;
2420 		break;
2421 	case 7:
2422 		params->pdiv = 8;
2423 		break;
2424 	default:
2425 		WARN(1, "Incorrect PDiv\n");
2426 	}
2427 
2428 	WARN_ON(kdiv != 2 && qdiv != 1);
2429 
2430 	params->qdiv_ratio = qdiv;
2431 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2432 
2433 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2434 
2435 	params->dco_integer = dco >> 15;
2436 	params->dco_fraction = dco & 0x7fff;
2437 }
2438 
2439 /*
2440  * Display WA #22010492432: ehl, tgl, adl-p
2441  * Program half of the nominal DCO divider fraction value.
2442  */
2443 static bool
2444 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2445 {
2446 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2447 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2448 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
2449 		 i915->dpll.ref_clks.nssc == 38400;
2450 }
2451 
2452 struct icl_combo_pll_params {
2453 	int clock;
2454 	struct skl_wrpll_params wrpll;
2455 };
2456 
2457 /*
2458  * These values alrea already adjusted: they're the bits we write to the
2459  * registers, not the logical values.
2460  */
2461 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2462 	{ 540000,
2463 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2464 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2465 	{ 270000,
2466 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2467 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2468 	{ 162000,
2469 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2470 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2471 	{ 324000,
2472 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2473 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2474 	{ 216000,
2475 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2476 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2477 	{ 432000,
2478 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2479 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2480 	{ 648000,
2481 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2482 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2483 	{ 810000,
2484 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2485 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2486 };
2487 
2488 
2489 /* Also used for 38.4 MHz values. */
2490 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2491 	{ 540000,
2492 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2493 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 	{ 270000,
2495 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2496 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 	{ 162000,
2498 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2499 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 	{ 324000,
2501 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2502 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 	{ 216000,
2504 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2505 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2506 	{ 432000,
2507 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2508 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2509 	{ 648000,
2510 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2511 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 	{ 810000,
2513 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2514 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 };
2516 
2517 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2518 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2519 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2520 };
2521 
2522 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2523 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2524 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2525 };
2526 
2527 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2528 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2529 	/* the following params are unused */
2530 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2531 };
2532 
2533 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2534 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2535 	/* the following params are unused */
2536 };
2537 
2538 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2539 				  struct skl_wrpll_params *pll_params)
2540 {
2541 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2542 	const struct icl_combo_pll_params *params =
2543 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2544 		icl_dp_combo_pll_24MHz_values :
2545 		icl_dp_combo_pll_19_2MHz_values;
2546 	int clock = crtc_state->port_clock;
2547 	int i;
2548 
2549 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2550 		if (clock == params[i].clock) {
2551 			*pll_params = params[i].wrpll;
2552 			return true;
2553 		}
2554 	}
2555 
2556 	MISSING_CASE(clock);
2557 	return false;
2558 }
2559 
2560 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2561 			     struct skl_wrpll_params *pll_params)
2562 {
2563 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2564 
2565 	if (DISPLAY_VER(dev_priv) >= 12) {
2566 		switch (dev_priv->dpll.ref_clks.nssc) {
2567 		default:
2568 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2569 			fallthrough;
2570 		case 19200:
2571 		case 38400:
2572 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2573 			break;
2574 		case 24000:
2575 			*pll_params = tgl_tbt_pll_24MHz_values;
2576 			break;
2577 		}
2578 	} else {
2579 		switch (dev_priv->dpll.ref_clks.nssc) {
2580 		default:
2581 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2582 			fallthrough;
2583 		case 19200:
2584 		case 38400:
2585 			*pll_params = icl_tbt_pll_19_2MHz_values;
2586 			break;
2587 		case 24000:
2588 			*pll_params = icl_tbt_pll_24MHz_values;
2589 			break;
2590 		}
2591 	}
2592 
2593 	return true;
2594 }
2595 
2596 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2597 				    const struct intel_shared_dpll *pll,
2598 				    const struct intel_dpll_hw_state *pll_state)
2599 {
2600 	/*
2601 	 * The PLL outputs multiple frequencies at the same time, selection is
2602 	 * made at DDI clock mux level.
2603 	 */
2604 	drm_WARN_ON(&i915->drm, 1);
2605 
2606 	return 0;
2607 }
2608 
2609 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2610 {
2611 	int ref_clock = i915->dpll.ref_clks.nssc;
2612 
2613 	/*
2614 	 * For ICL+, the spec states: if reference frequency is 38.4,
2615 	 * use 19.2 because the DPLL automatically divides that by 2.
2616 	 */
2617 	if (ref_clock == 38400)
2618 		ref_clock = 19200;
2619 
2620 	return ref_clock;
2621 }
2622 
2623 static bool
2624 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2625 	       struct skl_wrpll_params *wrpll_params)
2626 {
2627 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2628 	int ref_clock = icl_wrpll_ref_clock(i915);
2629 	u32 afe_clock = crtc_state->port_clock * 5;
2630 	u32 dco_min = 7998000;
2631 	u32 dco_max = 10000000;
2632 	u32 dco_mid = (dco_min + dco_max) / 2;
2633 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2634 					 18, 20, 24, 28, 30, 32,  36,  40,
2635 					 42, 44, 48, 50, 52, 54,  56,  60,
2636 					 64, 66, 68, 70, 72, 76,  78,  80,
2637 					 84, 88, 90, 92, 96, 98, 100, 102,
2638 					  3,  5,  7,  9, 15, 21 };
2639 	u32 dco, best_dco = 0, dco_centrality = 0;
2640 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2641 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2642 
2643 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2644 		dco = afe_clock * dividers[d];
2645 
2646 		if (dco <= dco_max && dco >= dco_min) {
2647 			dco_centrality = abs(dco - dco_mid);
2648 
2649 			if (dco_centrality < best_dco_centrality) {
2650 				best_dco_centrality = dco_centrality;
2651 				best_div = dividers[d];
2652 				best_dco = dco;
2653 			}
2654 		}
2655 	}
2656 
2657 	if (best_div == 0)
2658 		return false;
2659 
2660 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2661 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2662 				  pdiv, qdiv, kdiv);
2663 
2664 	return true;
2665 }
2666 
2667 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2668 				      const struct intel_shared_dpll *pll,
2669 				      const struct intel_dpll_hw_state *pll_state)
2670 {
2671 	int ref_clock = icl_wrpll_ref_clock(i915);
2672 	u32 dco_fraction;
2673 	u32 p0, p1, p2, dco_freq;
2674 
2675 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2676 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2677 
2678 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2679 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2680 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2681 	else
2682 		p1 = 1;
2683 
2684 	switch (p0) {
2685 	case DPLL_CFGCR1_PDIV_2:
2686 		p0 = 2;
2687 		break;
2688 	case DPLL_CFGCR1_PDIV_3:
2689 		p0 = 3;
2690 		break;
2691 	case DPLL_CFGCR1_PDIV_5:
2692 		p0 = 5;
2693 		break;
2694 	case DPLL_CFGCR1_PDIV_7:
2695 		p0 = 7;
2696 		break;
2697 	}
2698 
2699 	switch (p2) {
2700 	case DPLL_CFGCR1_KDIV_1:
2701 		p2 = 1;
2702 		break;
2703 	case DPLL_CFGCR1_KDIV_2:
2704 		p2 = 2;
2705 		break;
2706 	case DPLL_CFGCR1_KDIV_3:
2707 		p2 = 3;
2708 		break;
2709 	}
2710 
2711 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2712 		   ref_clock;
2713 
2714 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2715 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2716 
2717 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2718 		dco_fraction *= 2;
2719 
2720 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2721 
2722 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2723 		return 0;
2724 
2725 	return dco_freq / (p0 * p1 * p2 * 5);
2726 }
2727 
2728 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2729 				const struct skl_wrpll_params *pll_params,
2730 				struct intel_dpll_hw_state *pll_state)
2731 {
2732 	u32 dco_fraction = pll_params->dco_fraction;
2733 
2734 	memset(pll_state, 0, sizeof(*pll_state));
2735 
2736 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2737 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2738 
2739 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2740 			    pll_params->dco_integer;
2741 
2742 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2743 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2744 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2745 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2746 
2747 	if (DISPLAY_VER(i915) >= 12)
2748 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2749 	else
2750 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2751 
2752 	if (i915->vbt.override_afc_startup)
2753 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
2754 }
2755 
2756 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2757 				     u32 *target_dco_khz,
2758 				     struct intel_dpll_hw_state *state,
2759 				     bool is_dkl)
2760 {
2761 	u32 dco_min_freq, dco_max_freq;
2762 	int div1_vals[] = {7, 5, 3, 2};
2763 	unsigned int i;
2764 	int div2;
2765 
2766 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2767 	dco_max_freq = is_dp ? 8100000 : 10000000;
2768 
2769 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2770 		int div1 = div1_vals[i];
2771 
2772 		for (div2 = 10; div2 > 0; div2--) {
2773 			int dco = div1 * div2 * clock_khz * 5;
2774 			int a_divratio, tlinedrv, inputsel;
2775 			u32 hsdiv;
2776 
2777 			if (dco < dco_min_freq || dco > dco_max_freq)
2778 				continue;
2779 
2780 			if (div2 >= 2) {
2781 				/*
2782 				 * Note: a_divratio not matching TGL BSpec
2783 				 * algorithm but matching hardcoded values and
2784 				 * working on HW for DP alt-mode at least
2785 				 */
2786 				a_divratio = is_dp ? 10 : 5;
2787 				tlinedrv = is_dkl ? 1 : 2;
2788 			} else {
2789 				a_divratio = 5;
2790 				tlinedrv = 0;
2791 			}
2792 			inputsel = is_dp ? 0 : 1;
2793 
2794 			switch (div1) {
2795 			default:
2796 				MISSING_CASE(div1);
2797 				fallthrough;
2798 			case 2:
2799 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2800 				break;
2801 			case 3:
2802 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2803 				break;
2804 			case 5:
2805 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2806 				break;
2807 			case 7:
2808 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2809 				break;
2810 			}
2811 
2812 			*target_dco_khz = dco;
2813 
2814 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2815 
2816 			state->mg_clktop2_coreclkctl1 =
2817 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2818 
2819 			state->mg_clktop2_hsclkctl =
2820 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2821 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2822 				hsdiv |
2823 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2824 
2825 			return true;
2826 		}
2827 	}
2828 
2829 	return false;
2830 }
2831 
2832 /*
2833  * The specification for this function uses real numbers, so the math had to be
2834  * adapted to integer-only calculation, that's why it looks so different.
2835  */
2836 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2837 				  struct intel_dpll_hw_state *pll_state)
2838 {
2839 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2840 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2841 	int clock = crtc_state->port_clock;
2842 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2843 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2844 	u32 prop_coeff, int_coeff;
2845 	u32 tdc_targetcnt, feedfwgain;
2846 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2847 	u64 tmp;
2848 	bool use_ssc = false;
2849 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2850 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2851 
2852 	memset(pll_state, 0, sizeof(*pll_state));
2853 
2854 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2855 				      pll_state, is_dkl)) {
2856 		drm_dbg_kms(&dev_priv->drm,
2857 			    "Failed to find divisors for clock %d\n", clock);
2858 		return false;
2859 	}
2860 
2861 	m1div = 2;
2862 	m2div_int = dco_khz / (refclk_khz * m1div);
2863 	if (m2div_int > 255) {
2864 		if (!is_dkl) {
2865 			m1div = 4;
2866 			m2div_int = dco_khz / (refclk_khz * m1div);
2867 		}
2868 
2869 		if (m2div_int > 255) {
2870 			drm_dbg_kms(&dev_priv->drm,
2871 				    "Failed to find mdiv for clock %d\n",
2872 				    clock);
2873 			return false;
2874 		}
2875 	}
2876 	m2div_rem = dco_khz % (refclk_khz * m1div);
2877 
2878 	tmp = (u64)m2div_rem * (1 << 22);
2879 	do_div(tmp, refclk_khz * m1div);
2880 	m2div_frac = tmp;
2881 
2882 	switch (refclk_khz) {
2883 	case 19200:
2884 		iref_ndiv = 1;
2885 		iref_trim = 28;
2886 		iref_pulse_w = 1;
2887 		break;
2888 	case 24000:
2889 		iref_ndiv = 1;
2890 		iref_trim = 25;
2891 		iref_pulse_w = 2;
2892 		break;
2893 	case 38400:
2894 		iref_ndiv = 2;
2895 		iref_trim = 28;
2896 		iref_pulse_w = 1;
2897 		break;
2898 	default:
2899 		MISSING_CASE(refclk_khz);
2900 		return false;
2901 	}
2902 
2903 	/*
2904 	 * tdc_res = 0.000003
2905 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2906 	 *
2907 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2908 	 * was supposed to be a division, but we rearranged the operations of
2909 	 * the formula to avoid early divisions so we don't multiply the
2910 	 * rounding errors.
2911 	 *
2912 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2913 	 * we also rearrange to work with integers.
2914 	 *
2915 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2916 	 * last division by 10.
2917 	 */
2918 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2919 
2920 	/*
2921 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2922 	 * 32 bits. That's not a problem since we round the division down
2923 	 * anyway.
2924 	 */
2925 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2926 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2927 
2928 	if (dco_khz >= 9000000) {
2929 		prop_coeff = 5;
2930 		int_coeff = 10;
2931 	} else {
2932 		prop_coeff = 4;
2933 		int_coeff = 8;
2934 	}
2935 
2936 	if (use_ssc) {
2937 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2938 		do_div(tmp, refclk_khz * m1div * 10000);
2939 		ssc_stepsize = tmp;
2940 
2941 		tmp = mul_u32_u32(dco_khz, 1000);
2942 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2943 	} else {
2944 		ssc_stepsize = 0;
2945 		ssc_steplen = 0;
2946 	}
2947 	ssc_steplog = 4;
2948 
2949 	/* write pll_state calculations */
2950 	if (is_dkl) {
2951 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2952 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2953 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2954 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2955 		if (dev_priv->vbt.override_afc_startup) {
2956 			u8 val = dev_priv->vbt.override_afc_startup_val;
2957 
2958 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2959 		}
2960 
2961 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2962 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2963 
2964 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2965 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2966 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2967 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2968 
2969 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2970 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2971 
2972 		pll_state->mg_pll_tdc_coldst_bias =
2973 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2974 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2975 
2976 	} else {
2977 		pll_state->mg_pll_div0 =
2978 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2979 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2980 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2981 
2982 		pll_state->mg_pll_div1 =
2983 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2984 			MG_PLL_DIV1_DITHER_DIV_2 |
2985 			MG_PLL_DIV1_NDIVRATIO(1) |
2986 			MG_PLL_DIV1_FBPREDIV(m1div);
2987 
2988 		pll_state->mg_pll_lf =
2989 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2990 			MG_PLL_LF_AFCCNTSEL_512 |
2991 			MG_PLL_LF_GAINCTRL(1) |
2992 			MG_PLL_LF_INT_COEFF(int_coeff) |
2993 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2994 
2995 		pll_state->mg_pll_frac_lock =
2996 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2997 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2998 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2999 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3000 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3001 		if (use_ssc || m2div_rem > 0)
3002 			pll_state->mg_pll_frac_lock |=
3003 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3004 
3005 		pll_state->mg_pll_ssc =
3006 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3007 			MG_PLL_SSC_TYPE(2) |
3008 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3009 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3010 			MG_PLL_SSC_FLLEN |
3011 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3012 
3013 		pll_state->mg_pll_tdc_coldst_bias =
3014 			MG_PLL_TDC_COLDST_COLDSTART |
3015 			MG_PLL_TDC_COLDST_IREFINT_EN |
3016 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3017 			MG_PLL_TDC_TDCOVCCORR_EN |
3018 			MG_PLL_TDC_TDCSEL(3);
3019 
3020 		pll_state->mg_pll_bias =
3021 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3022 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3023 			MG_PLL_BIAS_BIAS_BONUS(10) |
3024 			MG_PLL_BIAS_BIASCAL_EN |
3025 			MG_PLL_BIAS_CTRIM(12) |
3026 			MG_PLL_BIAS_VREF_RDAC(4) |
3027 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3028 
3029 		if (refclk_khz == 38400) {
3030 			pll_state->mg_pll_tdc_coldst_bias_mask =
3031 				MG_PLL_TDC_COLDST_COLDSTART;
3032 			pll_state->mg_pll_bias_mask = 0;
3033 		} else {
3034 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3035 			pll_state->mg_pll_bias_mask = -1U;
3036 		}
3037 
3038 		pll_state->mg_pll_tdc_coldst_bias &=
3039 			pll_state->mg_pll_tdc_coldst_bias_mask;
3040 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3041 	}
3042 
3043 	return true;
3044 }
3045 
3046 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3047 				   const struct intel_shared_dpll *pll,
3048 				   const struct intel_dpll_hw_state *pll_state)
3049 {
3050 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3051 	u64 tmp;
3052 
3053 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3054 
3055 	if (DISPLAY_VER(dev_priv) >= 12) {
3056 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3057 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3058 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3059 
3060 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3061 			m2_frac = pll_state->mg_pll_bias &
3062 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3063 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3064 		} else {
3065 			m2_frac = 0;
3066 		}
3067 	} else {
3068 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3069 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3070 
3071 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3072 			m2_frac = pll_state->mg_pll_div0 &
3073 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3074 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3075 		} else {
3076 			m2_frac = 0;
3077 		}
3078 	}
3079 
3080 	switch (pll_state->mg_clktop2_hsclkctl &
3081 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3082 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3083 		div1 = 2;
3084 		break;
3085 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3086 		div1 = 3;
3087 		break;
3088 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3089 		div1 = 5;
3090 		break;
3091 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3092 		div1 = 7;
3093 		break;
3094 	default:
3095 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3096 		return 0;
3097 	}
3098 
3099 	div2 = (pll_state->mg_clktop2_hsclkctl &
3100 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3101 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3102 
3103 	/* div2 value of 0 is same as 1 means no div */
3104 	if (div2 == 0)
3105 		div2 = 1;
3106 
3107 	/*
3108 	 * Adjust the original formula to delay the division by 2^22 in order to
3109 	 * minimize possible rounding errors.
3110 	 */
3111 	tmp = (u64)m1 * m2_int * ref_clock +
3112 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3113 	tmp = div_u64(tmp, 5 * div1 * div2);
3114 
3115 	return tmp;
3116 }
3117 
3118 /**
3119  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3120  * @crtc_state: state for the CRTC to select the DPLL for
3121  * @port_dpll_id: the active @port_dpll_id to select
3122  *
3123  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3124  * CRTC.
3125  */
3126 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3127 			      enum icl_port_dpll_id port_dpll_id)
3128 {
3129 	struct icl_port_dpll *port_dpll =
3130 		&crtc_state->icl_port_dplls[port_dpll_id];
3131 
3132 	crtc_state->shared_dpll = port_dpll->pll;
3133 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3134 }
3135 
3136 static void icl_update_active_dpll(struct intel_atomic_state *state,
3137 				   struct intel_crtc *crtc,
3138 				   struct intel_encoder *encoder)
3139 {
3140 	struct intel_crtc_state *crtc_state =
3141 		intel_atomic_get_new_crtc_state(state, crtc);
3142 	struct intel_digital_port *primary_port;
3143 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3144 
3145 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3146 		enc_to_mst(encoder)->primary :
3147 		enc_to_dig_port(encoder);
3148 
3149 	if (primary_port &&
3150 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3151 	     intel_tc_port_in_legacy_mode(primary_port)))
3152 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3153 
3154 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3155 }
3156 
3157 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3158 {
3159 	if (!(i915->hti_state & HDPORT_ENABLED))
3160 		return 0;
3161 
3162 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3163 }
3164 
3165 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3166 				   struct intel_crtc *crtc,
3167 				   struct intel_encoder *encoder)
3168 {
3169 	struct intel_crtc_state *crtc_state =
3170 		intel_atomic_get_new_crtc_state(state, crtc);
3171 	struct skl_wrpll_params pll_params = { };
3172 	struct icl_port_dpll *port_dpll =
3173 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3174 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3175 	enum port port = encoder->port;
3176 	unsigned long dpll_mask;
3177 	int ret;
3178 
3179 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3180 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3181 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3182 	else
3183 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3184 
3185 	if (!ret) {
3186 		drm_dbg_kms(&dev_priv->drm,
3187 			    "Could not calculate combo PHY PLL state.\n");
3188 
3189 		return false;
3190 	}
3191 
3192 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3193 
3194 	if (IS_ALDERLAKE_S(dev_priv)) {
3195 		dpll_mask =
3196 			BIT(DPLL_ID_DG1_DPLL3) |
3197 			BIT(DPLL_ID_DG1_DPLL2) |
3198 			BIT(DPLL_ID_ICL_DPLL1) |
3199 			BIT(DPLL_ID_ICL_DPLL0);
3200 	} else if (IS_DG1(dev_priv)) {
3201 		if (port == PORT_D || port == PORT_E) {
3202 			dpll_mask =
3203 				BIT(DPLL_ID_DG1_DPLL2) |
3204 				BIT(DPLL_ID_DG1_DPLL3);
3205 		} else {
3206 			dpll_mask =
3207 				BIT(DPLL_ID_DG1_DPLL0) |
3208 				BIT(DPLL_ID_DG1_DPLL1);
3209 		}
3210 	} else if (IS_ROCKETLAKE(dev_priv)) {
3211 		dpll_mask =
3212 			BIT(DPLL_ID_EHL_DPLL4) |
3213 			BIT(DPLL_ID_ICL_DPLL1) |
3214 			BIT(DPLL_ID_ICL_DPLL0);
3215 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3216 		dpll_mask =
3217 			BIT(DPLL_ID_EHL_DPLL4) |
3218 			BIT(DPLL_ID_ICL_DPLL1) |
3219 			BIT(DPLL_ID_ICL_DPLL0);
3220 	} else {
3221 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3222 	}
3223 
3224 	/* Eliminate DPLLs from consideration if reserved by HTI */
3225 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3226 
3227 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3228 						&port_dpll->hw_state,
3229 						dpll_mask);
3230 	if (!port_dpll->pll) {
3231 		drm_dbg_kms(&dev_priv->drm,
3232 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3233 			    encoder->base.base.id, encoder->base.name);
3234 		return false;
3235 	}
3236 
3237 	intel_reference_shared_dpll(state, crtc,
3238 				    port_dpll->pll, &port_dpll->hw_state);
3239 
3240 	icl_update_active_dpll(state, crtc, encoder);
3241 
3242 	return true;
3243 }
3244 
3245 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3246 				 struct intel_crtc *crtc,
3247 				 struct intel_encoder *encoder)
3248 {
3249 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3250 	struct intel_crtc_state *crtc_state =
3251 		intel_atomic_get_new_crtc_state(state, crtc);
3252 	struct skl_wrpll_params pll_params = { };
3253 	struct icl_port_dpll *port_dpll;
3254 	enum intel_dpll_id dpll_id;
3255 
3256 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3257 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3258 		drm_dbg_kms(&dev_priv->drm,
3259 			    "Could not calculate TBT PLL state.\n");
3260 		return false;
3261 	}
3262 
3263 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3264 
3265 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3266 						&port_dpll->hw_state,
3267 						BIT(DPLL_ID_ICL_TBTPLL));
3268 	if (!port_dpll->pll) {
3269 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3270 		return false;
3271 	}
3272 	intel_reference_shared_dpll(state, crtc,
3273 				    port_dpll->pll, &port_dpll->hw_state);
3274 
3275 
3276 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3277 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3278 		drm_dbg_kms(&dev_priv->drm,
3279 			    "Could not calculate MG PHY PLL state.\n");
3280 		goto err_unreference_tbt_pll;
3281 	}
3282 
3283 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3284 							 encoder->port));
3285 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3286 						&port_dpll->hw_state,
3287 						BIT(dpll_id));
3288 	if (!port_dpll->pll) {
3289 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3290 		goto err_unreference_tbt_pll;
3291 	}
3292 	intel_reference_shared_dpll(state, crtc,
3293 				    port_dpll->pll, &port_dpll->hw_state);
3294 
3295 	icl_update_active_dpll(state, crtc, encoder);
3296 
3297 	return true;
3298 
3299 err_unreference_tbt_pll:
3300 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3301 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3302 
3303 	return false;
3304 }
3305 
3306 static bool icl_get_dplls(struct intel_atomic_state *state,
3307 			  struct intel_crtc *crtc,
3308 			  struct intel_encoder *encoder)
3309 {
3310 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3311 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3312 
3313 	if (intel_phy_is_combo(dev_priv, phy))
3314 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3315 	else if (intel_phy_is_tc(dev_priv, phy))
3316 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3317 
3318 	MISSING_CASE(phy);
3319 
3320 	return false;
3321 }
3322 
3323 static void icl_put_dplls(struct intel_atomic_state *state,
3324 			  struct intel_crtc *crtc)
3325 {
3326 	const struct intel_crtc_state *old_crtc_state =
3327 		intel_atomic_get_old_crtc_state(state, crtc);
3328 	struct intel_crtc_state *new_crtc_state =
3329 		intel_atomic_get_new_crtc_state(state, crtc);
3330 	enum icl_port_dpll_id id;
3331 
3332 	new_crtc_state->shared_dpll = NULL;
3333 
3334 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3335 		const struct icl_port_dpll *old_port_dpll =
3336 			&old_crtc_state->icl_port_dplls[id];
3337 		struct icl_port_dpll *new_port_dpll =
3338 			&new_crtc_state->icl_port_dplls[id];
3339 
3340 		new_port_dpll->pll = NULL;
3341 
3342 		if (!old_port_dpll->pll)
3343 			continue;
3344 
3345 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3346 	}
3347 }
3348 
3349 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3350 				struct intel_shared_dpll *pll,
3351 				struct intel_dpll_hw_state *hw_state)
3352 {
3353 	const enum intel_dpll_id id = pll->info->id;
3354 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3355 	intel_wakeref_t wakeref;
3356 	bool ret = false;
3357 	u32 val;
3358 
3359 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3360 
3361 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3362 						     POWER_DOMAIN_DISPLAY_CORE);
3363 	if (!wakeref)
3364 		return false;
3365 
3366 	val = intel_de_read(dev_priv, enable_reg);
3367 	if (!(val & PLL_ENABLE))
3368 		goto out;
3369 
3370 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3371 						  MG_REFCLKIN_CTL(tc_port));
3372 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3373 
3374 	hw_state->mg_clktop2_coreclkctl1 =
3375 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3376 	hw_state->mg_clktop2_coreclkctl1 &=
3377 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3378 
3379 	hw_state->mg_clktop2_hsclkctl =
3380 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3381 	hw_state->mg_clktop2_hsclkctl &=
3382 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3383 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3384 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3385 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3386 
3387 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3388 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3389 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3390 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3391 						   MG_PLL_FRAC_LOCK(tc_port));
3392 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3393 
3394 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3395 	hw_state->mg_pll_tdc_coldst_bias =
3396 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3397 
3398 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3399 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3400 		hw_state->mg_pll_bias_mask = 0;
3401 	} else {
3402 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3403 		hw_state->mg_pll_bias_mask = -1U;
3404 	}
3405 
3406 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3407 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3408 
3409 	ret = true;
3410 out:
3411 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3412 	return ret;
3413 }
3414 
3415 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3416 				 struct intel_shared_dpll *pll,
3417 				 struct intel_dpll_hw_state *hw_state)
3418 {
3419 	const enum intel_dpll_id id = pll->info->id;
3420 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3421 	intel_wakeref_t wakeref;
3422 	bool ret = false;
3423 	u32 val;
3424 
3425 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3426 						     POWER_DOMAIN_DISPLAY_CORE);
3427 	if (!wakeref)
3428 		return false;
3429 
3430 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3431 	if (!(val & PLL_ENABLE))
3432 		goto out;
3433 
3434 	/*
3435 	 * All registers read here have the same HIP_INDEX_REG even though
3436 	 * they are on different building blocks
3437 	 */
3438 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3439 		       HIP_INDEX_VAL(tc_port, 0x2));
3440 
3441 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3442 						  DKL_REFCLKIN_CTL(tc_port));
3443 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3444 
3445 	hw_state->mg_clktop2_hsclkctl =
3446 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3447 	hw_state->mg_clktop2_hsclkctl &=
3448 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3449 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3450 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3451 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3452 
3453 	hw_state->mg_clktop2_coreclkctl1 =
3454 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3455 	hw_state->mg_clktop2_coreclkctl1 &=
3456 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3457 
3458 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3459 	val = DKL_PLL_DIV0_MASK;
3460 	if (dev_priv->vbt.override_afc_startup)
3461 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3462 	hw_state->mg_pll_div0 &= val;
3463 
3464 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3465 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3466 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3467 
3468 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3469 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3470 				 DKL_PLL_SSC_STEP_LEN_MASK |
3471 				 DKL_PLL_SSC_STEP_NUM_MASK |
3472 				 DKL_PLL_SSC_EN);
3473 
3474 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3475 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3476 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3477 
3478 	hw_state->mg_pll_tdc_coldst_bias =
3479 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3480 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3481 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3482 
3483 	ret = true;
3484 out:
3485 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3486 	return ret;
3487 }
3488 
3489 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3490 				 struct intel_shared_dpll *pll,
3491 				 struct intel_dpll_hw_state *hw_state,
3492 				 i915_reg_t enable_reg)
3493 {
3494 	const enum intel_dpll_id id = pll->info->id;
3495 	intel_wakeref_t wakeref;
3496 	bool ret = false;
3497 	u32 val;
3498 
3499 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3500 						     POWER_DOMAIN_DISPLAY_CORE);
3501 	if (!wakeref)
3502 		return false;
3503 
3504 	val = intel_de_read(dev_priv, enable_reg);
3505 	if (!(val & PLL_ENABLE))
3506 		goto out;
3507 
3508 	if (IS_ALDERLAKE_S(dev_priv)) {
3509 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3510 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3511 	} else if (IS_DG1(dev_priv)) {
3512 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3513 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3514 	} else if (IS_ROCKETLAKE(dev_priv)) {
3515 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3516 						 RKL_DPLL_CFGCR0(id));
3517 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3518 						 RKL_DPLL_CFGCR1(id));
3519 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3520 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3521 						 TGL_DPLL_CFGCR0(id));
3522 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3523 						 TGL_DPLL_CFGCR1(id));
3524 		if (dev_priv->vbt.override_afc_startup) {
3525 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3526 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3527 		}
3528 	} else {
3529 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3530 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3531 							 ICL_DPLL_CFGCR0(4));
3532 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3533 							 ICL_DPLL_CFGCR1(4));
3534 		} else {
3535 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3536 							 ICL_DPLL_CFGCR0(id));
3537 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3538 							 ICL_DPLL_CFGCR1(id));
3539 		}
3540 	}
3541 
3542 	ret = true;
3543 out:
3544 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3545 	return ret;
3546 }
3547 
3548 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3549 				   struct intel_shared_dpll *pll,
3550 				   struct intel_dpll_hw_state *hw_state)
3551 {
3552 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3553 
3554 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3555 }
3556 
3557 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3558 				 struct intel_shared_dpll *pll,
3559 				 struct intel_dpll_hw_state *hw_state)
3560 {
3561 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3562 }
3563 
3564 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3565 			   struct intel_shared_dpll *pll)
3566 {
3567 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3568 	const enum intel_dpll_id id = pll->info->id;
3569 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3570 
3571 	if (IS_ALDERLAKE_S(dev_priv)) {
3572 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3573 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3574 	} else if (IS_DG1(dev_priv)) {
3575 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3576 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3577 	} else if (IS_ROCKETLAKE(dev_priv)) {
3578 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3579 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3580 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3581 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3582 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3583 		div0_reg = TGL_DPLL0_DIV0(id);
3584 	} else {
3585 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3586 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3587 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3588 		} else {
3589 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3590 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3591 		}
3592 	}
3593 
3594 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3595 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3596 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
3597 			 !i915_mmio_reg_valid(div0_reg));
3598 	if (dev_priv->vbt.override_afc_startup &&
3599 	    i915_mmio_reg_valid(div0_reg))
3600 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3601 			     hw_state->div0);
3602 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3603 }
3604 
3605 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3606 			     struct intel_shared_dpll *pll)
3607 {
3608 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3609 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3610 	u32 val;
3611 
3612 	/*
3613 	 * Some of the following registers have reserved fields, so program
3614 	 * these with RMW based on a mask. The mask can be fixed or generated
3615 	 * during the calc/readout phase if the mask depends on some other HW
3616 	 * state like refclk, see icl_calc_mg_pll_state().
3617 	 */
3618 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3619 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3620 	val |= hw_state->mg_refclkin_ctl;
3621 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3622 
3623 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3624 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3625 	val |= hw_state->mg_clktop2_coreclkctl1;
3626 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3627 
3628 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3629 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3630 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3631 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3632 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3633 	val |= hw_state->mg_clktop2_hsclkctl;
3634 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3635 
3636 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3637 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3638 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3639 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3640 		       hw_state->mg_pll_frac_lock);
3641 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3642 
3643 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3644 	val &= ~hw_state->mg_pll_bias_mask;
3645 	val |= hw_state->mg_pll_bias;
3646 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3647 
3648 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3649 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3650 	val |= hw_state->mg_pll_tdc_coldst_bias;
3651 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3652 
3653 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3654 }
3655 
3656 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3657 			  struct intel_shared_dpll *pll)
3658 {
3659 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3660 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3661 	u32 val;
3662 
3663 	/*
3664 	 * All registers programmed here have the same HIP_INDEX_REG even
3665 	 * though on different building block
3666 	 */
3667 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3668 		       HIP_INDEX_VAL(tc_port, 0x2));
3669 
3670 	/* All the registers are RMW */
3671 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3672 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3673 	val |= hw_state->mg_refclkin_ctl;
3674 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3675 
3676 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3677 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3678 	val |= hw_state->mg_clktop2_coreclkctl1;
3679 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3680 
3681 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3682 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3683 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3684 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3685 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3686 	val |= hw_state->mg_clktop2_hsclkctl;
3687 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3688 
3689 	val = DKL_PLL_DIV0_MASK;
3690 	if (dev_priv->vbt.override_afc_startup)
3691 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3692 	intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3693 		     hw_state->mg_pll_div0);
3694 
3695 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3696 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3697 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3698 	val |= hw_state->mg_pll_div1;
3699 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3700 
3701 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3702 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3703 		 DKL_PLL_SSC_STEP_LEN_MASK |
3704 		 DKL_PLL_SSC_STEP_NUM_MASK |
3705 		 DKL_PLL_SSC_EN);
3706 	val |= hw_state->mg_pll_ssc;
3707 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3708 
3709 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3710 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3711 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3712 	val |= hw_state->mg_pll_bias;
3713 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3714 
3715 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3716 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3717 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3718 	val |= hw_state->mg_pll_tdc_coldst_bias;
3719 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3720 
3721 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3722 }
3723 
3724 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3725 				 struct intel_shared_dpll *pll,
3726 				 i915_reg_t enable_reg)
3727 {
3728 	u32 val;
3729 
3730 	val = intel_de_read(dev_priv, enable_reg);
3731 	val |= PLL_POWER_ENABLE;
3732 	intel_de_write(dev_priv, enable_reg, val);
3733 
3734 	/*
3735 	 * The spec says we need to "wait" but it also says it should be
3736 	 * immediate.
3737 	 */
3738 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3739 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3740 			pll->info->id);
3741 }
3742 
3743 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3744 			   struct intel_shared_dpll *pll,
3745 			   i915_reg_t enable_reg)
3746 {
3747 	u32 val;
3748 
3749 	val = intel_de_read(dev_priv, enable_reg);
3750 	val |= PLL_ENABLE;
3751 	intel_de_write(dev_priv, enable_reg, val);
3752 
3753 	/* Timeout is actually 600us. */
3754 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3755 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3756 }
3757 
3758 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3759 {
3760 	u32 val;
3761 
3762 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3763 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3764 		return;
3765 	/*
3766 	 * Wa_16011069516:adl-p[a0]
3767 	 *
3768 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3769 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3770 	 * sanity check this assumption with a double read, which presumably
3771 	 * returns the correct value even with clock gating on.
3772 	 *
3773 	 * Instead of the usual place for workarounds we apply this one here,
3774 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3775 	 */
3776 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3777 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3778 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3779 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3780 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3781 }
3782 
3783 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3784 			     struct intel_shared_dpll *pll)
3785 {
3786 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3787 
3788 	if (IS_JSL_EHL(dev_priv) &&
3789 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3790 
3791 		/*
3792 		 * We need to disable DC states when this DPLL is enabled.
3793 		 * This can be done by taking a reference on DPLL4 power
3794 		 * domain.
3795 		 */
3796 		pll->wakeref = intel_display_power_get(dev_priv,
3797 						       POWER_DOMAIN_DC_OFF);
3798 	}
3799 
3800 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3801 
3802 	icl_dpll_write(dev_priv, pll);
3803 
3804 	/*
3805 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3806 	 * paths should already be setting the appropriate voltage, hence we do
3807 	 * nothing here.
3808 	 */
3809 
3810 	icl_pll_enable(dev_priv, pll, enable_reg);
3811 
3812 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3813 
3814 	/* DVFS post sequence would be here. See the comment above. */
3815 }
3816 
3817 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3818 			   struct intel_shared_dpll *pll)
3819 {
3820 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3821 
3822 	icl_dpll_write(dev_priv, pll);
3823 
3824 	/*
3825 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3826 	 * paths should already be setting the appropriate voltage, hence we do
3827 	 * nothing here.
3828 	 */
3829 
3830 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3831 
3832 	/* DVFS post sequence would be here. See the comment above. */
3833 }
3834 
3835 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3836 			  struct intel_shared_dpll *pll)
3837 {
3838 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3839 
3840 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3841 
3842 	if (DISPLAY_VER(dev_priv) >= 12)
3843 		dkl_pll_write(dev_priv, pll);
3844 	else
3845 		icl_mg_pll_write(dev_priv, pll);
3846 
3847 	/*
3848 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3849 	 * paths should already be setting the appropriate voltage, hence we do
3850 	 * nothing here.
3851 	 */
3852 
3853 	icl_pll_enable(dev_priv, pll, enable_reg);
3854 
3855 	/* DVFS post sequence would be here. See the comment above. */
3856 }
3857 
3858 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3859 			    struct intel_shared_dpll *pll,
3860 			    i915_reg_t enable_reg)
3861 {
3862 	u32 val;
3863 
3864 	/* The first steps are done by intel_ddi_post_disable(). */
3865 
3866 	/*
3867 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3868 	 * paths should already be setting the appropriate voltage, hence we do
3869 	 * nothing here.
3870 	 */
3871 
3872 	val = intel_de_read(dev_priv, enable_reg);
3873 	val &= ~PLL_ENABLE;
3874 	intel_de_write(dev_priv, enable_reg, val);
3875 
3876 	/* Timeout is actually 1us. */
3877 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3878 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3879 
3880 	/* DVFS post sequence would be here. See the comment above. */
3881 
3882 	val = intel_de_read(dev_priv, enable_reg);
3883 	val &= ~PLL_POWER_ENABLE;
3884 	intel_de_write(dev_priv, enable_reg, val);
3885 
3886 	/*
3887 	 * The spec says we need to "wait" but it also says it should be
3888 	 * immediate.
3889 	 */
3890 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3891 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3892 			pll->info->id);
3893 }
3894 
3895 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3896 			      struct intel_shared_dpll *pll)
3897 {
3898 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3899 
3900 	icl_pll_disable(dev_priv, pll, enable_reg);
3901 
3902 	if (IS_JSL_EHL(dev_priv) &&
3903 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3904 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3905 					pll->wakeref);
3906 }
3907 
3908 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3909 			    struct intel_shared_dpll *pll)
3910 {
3911 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3912 }
3913 
3914 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3915 			   struct intel_shared_dpll *pll)
3916 {
3917 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3918 
3919 	icl_pll_disable(dev_priv, pll, enable_reg);
3920 }
3921 
3922 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3923 {
3924 	/* No SSC ref */
3925 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3926 }
3927 
3928 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3929 			      const struct intel_dpll_hw_state *hw_state)
3930 {
3931 	drm_dbg_kms(&dev_priv->drm,
3932 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3933 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3934 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3935 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3936 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3937 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3938 		    hw_state->cfgcr0, hw_state->cfgcr1,
3939 		    hw_state->div0,
3940 		    hw_state->mg_refclkin_ctl,
3941 		    hw_state->mg_clktop2_coreclkctl1,
3942 		    hw_state->mg_clktop2_hsclkctl,
3943 		    hw_state->mg_pll_div0,
3944 		    hw_state->mg_pll_div1,
3945 		    hw_state->mg_pll_lf,
3946 		    hw_state->mg_pll_frac_lock,
3947 		    hw_state->mg_pll_ssc,
3948 		    hw_state->mg_pll_bias,
3949 		    hw_state->mg_pll_tdc_coldst_bias);
3950 }
3951 
3952 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3953 	.enable = combo_pll_enable,
3954 	.disable = combo_pll_disable,
3955 	.get_hw_state = combo_pll_get_hw_state,
3956 	.get_freq = icl_ddi_combo_pll_get_freq,
3957 };
3958 
3959 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3960 	.enable = tbt_pll_enable,
3961 	.disable = tbt_pll_disable,
3962 	.get_hw_state = tbt_pll_get_hw_state,
3963 	.get_freq = icl_ddi_tbt_pll_get_freq,
3964 };
3965 
3966 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3967 	.enable = mg_pll_enable,
3968 	.disable = mg_pll_disable,
3969 	.get_hw_state = mg_pll_get_hw_state,
3970 	.get_freq = icl_ddi_mg_pll_get_freq,
3971 };
3972 
3973 static const struct dpll_info icl_plls[] = {
3974 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3975 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3976 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3977 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3978 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3979 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3980 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3981 	{ },
3982 };
3983 
3984 static const struct intel_dpll_mgr icl_pll_mgr = {
3985 	.dpll_info = icl_plls,
3986 	.get_dplls = icl_get_dplls,
3987 	.put_dplls = icl_put_dplls,
3988 	.update_active_dpll = icl_update_active_dpll,
3989 	.update_ref_clks = icl_update_dpll_ref_clks,
3990 	.dump_hw_state = icl_dump_hw_state,
3991 };
3992 
3993 static const struct dpll_info ehl_plls[] = {
3994 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3995 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3996 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3997 	{ },
3998 };
3999 
4000 static const struct intel_dpll_mgr ehl_pll_mgr = {
4001 	.dpll_info = ehl_plls,
4002 	.get_dplls = icl_get_dplls,
4003 	.put_dplls = icl_put_dplls,
4004 	.update_ref_clks = icl_update_dpll_ref_clks,
4005 	.dump_hw_state = icl_dump_hw_state,
4006 };
4007 
4008 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4009 	.enable = mg_pll_enable,
4010 	.disable = mg_pll_disable,
4011 	.get_hw_state = dkl_pll_get_hw_state,
4012 	.get_freq = icl_ddi_mg_pll_get_freq,
4013 };
4014 
4015 static const struct dpll_info tgl_plls[] = {
4016 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4017 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4018 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4019 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4020 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4021 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4022 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4023 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4024 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4025 	{ },
4026 };
4027 
4028 static const struct intel_dpll_mgr tgl_pll_mgr = {
4029 	.dpll_info = tgl_plls,
4030 	.get_dplls = icl_get_dplls,
4031 	.put_dplls = icl_put_dplls,
4032 	.update_active_dpll = icl_update_active_dpll,
4033 	.update_ref_clks = icl_update_dpll_ref_clks,
4034 	.dump_hw_state = icl_dump_hw_state,
4035 };
4036 
4037 static const struct dpll_info rkl_plls[] = {
4038 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4039 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4040 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4041 	{ },
4042 };
4043 
4044 static const struct intel_dpll_mgr rkl_pll_mgr = {
4045 	.dpll_info = rkl_plls,
4046 	.get_dplls = icl_get_dplls,
4047 	.put_dplls = icl_put_dplls,
4048 	.update_ref_clks = icl_update_dpll_ref_clks,
4049 	.dump_hw_state = icl_dump_hw_state,
4050 };
4051 
4052 static const struct dpll_info dg1_plls[] = {
4053 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4054 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4055 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4056 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4057 	{ },
4058 };
4059 
4060 static const struct intel_dpll_mgr dg1_pll_mgr = {
4061 	.dpll_info = dg1_plls,
4062 	.get_dplls = icl_get_dplls,
4063 	.put_dplls = icl_put_dplls,
4064 	.update_ref_clks = icl_update_dpll_ref_clks,
4065 	.dump_hw_state = icl_dump_hw_state,
4066 };
4067 
4068 static const struct dpll_info adls_plls[] = {
4069 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4070 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4071 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4072 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4073 	{ },
4074 };
4075 
4076 static const struct intel_dpll_mgr adls_pll_mgr = {
4077 	.dpll_info = adls_plls,
4078 	.get_dplls = icl_get_dplls,
4079 	.put_dplls = icl_put_dplls,
4080 	.update_ref_clks = icl_update_dpll_ref_clks,
4081 	.dump_hw_state = icl_dump_hw_state,
4082 };
4083 
4084 static const struct dpll_info adlp_plls[] = {
4085 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4086 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4087 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4088 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4089 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4090 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4091 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4092 	{ },
4093 };
4094 
4095 static const struct intel_dpll_mgr adlp_pll_mgr = {
4096 	.dpll_info = adlp_plls,
4097 	.get_dplls = icl_get_dplls,
4098 	.put_dplls = icl_put_dplls,
4099 	.update_active_dpll = icl_update_active_dpll,
4100 	.update_ref_clks = icl_update_dpll_ref_clks,
4101 	.dump_hw_state = icl_dump_hw_state,
4102 };
4103 
4104 /**
4105  * intel_shared_dpll_init - Initialize shared DPLLs
4106  * @dev: drm device
4107  *
4108  * Initialize shared DPLLs for @dev.
4109  */
4110 void intel_shared_dpll_init(struct drm_device *dev)
4111 {
4112 	struct drm_i915_private *dev_priv = to_i915(dev);
4113 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4114 	const struct dpll_info *dpll_info;
4115 	int i;
4116 
4117 	if (IS_DG2(dev_priv))
4118 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4119 		dpll_mgr = NULL;
4120 	else if (IS_ALDERLAKE_P(dev_priv))
4121 		dpll_mgr = &adlp_pll_mgr;
4122 	else if (IS_ALDERLAKE_S(dev_priv))
4123 		dpll_mgr = &adls_pll_mgr;
4124 	else if (IS_DG1(dev_priv))
4125 		dpll_mgr = &dg1_pll_mgr;
4126 	else if (IS_ROCKETLAKE(dev_priv))
4127 		dpll_mgr = &rkl_pll_mgr;
4128 	else if (DISPLAY_VER(dev_priv) >= 12)
4129 		dpll_mgr = &tgl_pll_mgr;
4130 	else if (IS_JSL_EHL(dev_priv))
4131 		dpll_mgr = &ehl_pll_mgr;
4132 	else if (DISPLAY_VER(dev_priv) >= 11)
4133 		dpll_mgr = &icl_pll_mgr;
4134 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4135 		dpll_mgr = &bxt_pll_mgr;
4136 	else if (DISPLAY_VER(dev_priv) == 9)
4137 		dpll_mgr = &skl_pll_mgr;
4138 	else if (HAS_DDI(dev_priv))
4139 		dpll_mgr = &hsw_pll_mgr;
4140 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4141 		dpll_mgr = &pch_pll_mgr;
4142 
4143 	if (!dpll_mgr) {
4144 		dev_priv->dpll.num_shared_dpll = 0;
4145 		return;
4146 	}
4147 
4148 	dpll_info = dpll_mgr->dpll_info;
4149 
4150 	for (i = 0; dpll_info[i].name; i++) {
4151 		drm_WARN_ON(dev, i != dpll_info[i].id);
4152 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4153 	}
4154 
4155 	dev_priv->dpll.mgr = dpll_mgr;
4156 	dev_priv->dpll.num_shared_dpll = i;
4157 	mutex_init(&dev_priv->dpll.lock);
4158 
4159 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4160 }
4161 
4162 /**
4163  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4164  * @state: atomic state
4165  * @crtc: CRTC to reserve DPLLs for
4166  * @encoder: encoder
4167  *
4168  * This function reserves all required DPLLs for the given CRTC and encoder
4169  * combination in the current atomic commit @state and the new @crtc atomic
4170  * state.
4171  *
4172  * The new configuration in the atomic commit @state is made effective by
4173  * calling intel_shared_dpll_swap_state().
4174  *
4175  * The reserved DPLLs should be released by calling
4176  * intel_release_shared_dplls().
4177  *
4178  * Returns:
4179  * True if all required DPLLs were successfully reserved.
4180  */
4181 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4182 				struct intel_crtc *crtc,
4183 				struct intel_encoder *encoder)
4184 {
4185 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4186 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4187 
4188 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4189 		return false;
4190 
4191 	return dpll_mgr->get_dplls(state, crtc, encoder);
4192 }
4193 
4194 /**
4195  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4196  * @state: atomic state
4197  * @crtc: crtc from which the DPLLs are to be released
4198  *
4199  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4200  * from the current atomic commit @state and the old @crtc atomic state.
4201  *
4202  * The new configuration in the atomic commit @state is made effective by
4203  * calling intel_shared_dpll_swap_state().
4204  */
4205 void intel_release_shared_dplls(struct intel_atomic_state *state,
4206 				struct intel_crtc *crtc)
4207 {
4208 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4209 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4210 
4211 	/*
4212 	 * FIXME: this function is called for every platform having a
4213 	 * compute_clock hook, even though the platform doesn't yet support
4214 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4215 	 * called on those.
4216 	 */
4217 	if (!dpll_mgr)
4218 		return;
4219 
4220 	dpll_mgr->put_dplls(state, crtc);
4221 }
4222 
4223 /**
4224  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4225  * @state: atomic state
4226  * @crtc: the CRTC for which to update the active DPLL
4227  * @encoder: encoder determining the type of port DPLL
4228  *
4229  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4230  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4231  * DPLL selected will be based on the current mode of the encoder's port.
4232  */
4233 void intel_update_active_dpll(struct intel_atomic_state *state,
4234 			      struct intel_crtc *crtc,
4235 			      struct intel_encoder *encoder)
4236 {
4237 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4238 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4239 
4240 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4241 		return;
4242 
4243 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4244 }
4245 
4246 /**
4247  * intel_dpll_get_freq - calculate the DPLL's output frequency
4248  * @i915: i915 device
4249  * @pll: DPLL for which to calculate the output frequency
4250  * @pll_state: DPLL state from which to calculate the output frequency
4251  *
4252  * Return the output frequency corresponding to @pll's passed in @pll_state.
4253  */
4254 int intel_dpll_get_freq(struct drm_i915_private *i915,
4255 			const struct intel_shared_dpll *pll,
4256 			const struct intel_dpll_hw_state *pll_state)
4257 {
4258 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4259 		return 0;
4260 
4261 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4262 }
4263 
4264 /**
4265  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4266  * @i915: i915 device
4267  * @pll: DPLL for which to calculate the output frequency
4268  * @hw_state: DPLL's hardware state
4269  *
4270  * Read out @pll's hardware state into @hw_state.
4271  */
4272 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4273 			     struct intel_shared_dpll *pll,
4274 			     struct intel_dpll_hw_state *hw_state)
4275 {
4276 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4277 }
4278 
4279 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4280 				  struct intel_shared_dpll *pll)
4281 {
4282 	struct intel_crtc *crtc;
4283 
4284 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4285 
4286 	if (IS_JSL_EHL(i915) && pll->on &&
4287 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4288 		pll->wakeref = intel_display_power_get(i915,
4289 						       POWER_DOMAIN_DC_OFF);
4290 	}
4291 
4292 	pll->state.pipe_mask = 0;
4293 	for_each_intel_crtc(&i915->drm, crtc) {
4294 		struct intel_crtc_state *crtc_state =
4295 			to_intel_crtc_state(crtc->base.state);
4296 
4297 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4298 			pll->state.pipe_mask |= BIT(crtc->pipe);
4299 	}
4300 	pll->active_mask = pll->state.pipe_mask;
4301 
4302 	drm_dbg_kms(&i915->drm,
4303 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4304 		    pll->info->name, pll->state.pipe_mask, pll->on);
4305 }
4306 
4307 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4308 {
4309 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4310 		i915->dpll.mgr->update_ref_clks(i915);
4311 }
4312 
4313 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4314 {
4315 	int i;
4316 
4317 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4318 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4319 }
4320 
4321 static void sanitize_dpll_state(struct drm_i915_private *i915,
4322 				struct intel_shared_dpll *pll)
4323 {
4324 	if (!pll->on)
4325 		return;
4326 
4327 	adlp_cmtg_clock_gating_wa(i915, pll);
4328 
4329 	if (pll->active_mask)
4330 		return;
4331 
4332 	drm_dbg_kms(&i915->drm,
4333 		    "%s enabled but not in use, disabling\n",
4334 		    pll->info->name);
4335 
4336 	pll->info->funcs->disable(i915, pll);
4337 	pll->on = false;
4338 }
4339 
4340 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4341 {
4342 	int i;
4343 
4344 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4345 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4346 }
4347 
4348 /**
4349  * intel_dpll_dump_hw_state - write hw_state to dmesg
4350  * @dev_priv: i915 drm device
4351  * @hw_state: hw state to be written to the log
4352  *
4353  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4354  */
4355 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4356 			      const struct intel_dpll_hw_state *hw_state)
4357 {
4358 	if (dev_priv->dpll.mgr) {
4359 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4360 	} else {
4361 		/* fallback for platforms that don't use the shared dpll
4362 		 * infrastructure
4363 		 */
4364 		drm_dbg_kms(&dev_priv->drm,
4365 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4366 			    "fp0: 0x%x, fp1: 0x%x\n",
4367 			    hw_state->dpll,
4368 			    hw_state->dpll_md,
4369 			    hw_state->fp0,
4370 			    hw_state->fp1);
4371 	}
4372 }
4373