1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_de.h"
25 #include "intel_display_types.h"
26 #include "intel_dpio_phy.h"
27 #include "intel_dpll.h"
28 #include "intel_dpll_mgr.h"
29 #include "intel_pch_refclk.h"
30 #include "intel_tc.h"
31 #include "intel_tc_phy_regs.h"
32 
33 /**
34  * DOC: Display PLLs
35  *
36  * Display PLLs used for driving outputs vary by platform. While some have
37  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
38  * from a pool. In the latter scenario, it is possible that multiple pipes
39  * share a PLL if their configurations match.
40  *
41  * This file provides an abstraction over display PLLs. The function
42  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
43  * users of a PLL are tracked and that tracking is integrated with the atomic
44  * modset interface. During an atomic operation, required PLLs can be reserved
45  * for a given CRTC and encoder configuration by calling
46  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
47  * with intel_release_shared_dplls().
48  * Changes to the users are first staged in the atomic state, and then made
49  * effective by calling intel_shared_dpll_swap_state() during the atomic
50  * commit phase.
51  */
52 
53 /* platform specific hooks for managing DPLLs */
54 struct intel_shared_dpll_funcs {
55 	/*
56 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
57 	 * the pll is not already enabled.
58 	 */
59 	void (*enable)(struct drm_i915_private *i915,
60 		       struct intel_shared_dpll *pll);
61 
62 	/*
63 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
64 	 * only when it is safe to disable the pll, i.e., there are no more
65 	 * tracked users for it.
66 	 */
67 	void (*disable)(struct drm_i915_private *i915,
68 			struct intel_shared_dpll *pll);
69 
70 	/*
71 	 * Hook for reading the values currently programmed to the DPLL
72 	 * registers. This is used for initial hw state readout and state
73 	 * verification after a mode set.
74 	 */
75 	bool (*get_hw_state)(struct drm_i915_private *i915,
76 			     struct intel_shared_dpll *pll,
77 			     struct intel_dpll_hw_state *hw_state);
78 
79 	/*
80 	 * Hook for calculating the pll's output frequency based on its passed
81 	 * in state.
82 	 */
83 	int (*get_freq)(struct drm_i915_private *i915,
84 			const struct intel_shared_dpll *pll,
85 			const struct intel_dpll_hw_state *pll_state);
86 };
87 
88 struct intel_dpll_mgr {
89 	const struct dpll_info *dpll_info;
90 
91 	bool (*get_dplls)(struct intel_atomic_state *state,
92 			  struct intel_crtc *crtc,
93 			  struct intel_encoder *encoder);
94 	void (*put_dplls)(struct intel_atomic_state *state,
95 			  struct intel_crtc *crtc);
96 	void (*update_active_dpll)(struct intel_atomic_state *state,
97 				   struct intel_crtc *crtc,
98 				   struct intel_encoder *encoder);
99 	void (*update_ref_clks)(struct drm_i915_private *i915);
100 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
101 			      const struct intel_dpll_hw_state *hw_state);
102 };
103 
104 static void
105 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
106 				  struct intel_shared_dpll_state *shared_dpll)
107 {
108 	enum intel_dpll_id i;
109 
110 	/* Copy shared dpll state */
111 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
112 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
113 
114 		shared_dpll[i] = pll->state;
115 	}
116 }
117 
118 static struct intel_shared_dpll_state *
119 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
120 {
121 	struct intel_atomic_state *state = to_intel_atomic_state(s);
122 
123 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
124 
125 	if (!state->dpll_set) {
126 		state->dpll_set = true;
127 
128 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
129 						  state->shared_dpll);
130 	}
131 
132 	return state->shared_dpll;
133 }
134 
135 /**
136  * intel_get_shared_dpll_by_id - get a DPLL given its id
137  * @dev_priv: i915 device instance
138  * @id: pll id
139  *
140  * Returns:
141  * A pointer to the DPLL with @id
142  */
143 struct intel_shared_dpll *
144 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
145 			    enum intel_dpll_id id)
146 {
147 	return &dev_priv->dpll.shared_dplls[id];
148 }
149 
150 /**
151  * intel_get_shared_dpll_id - get the id of a DPLL
152  * @dev_priv: i915 device instance
153  * @pll: the DPLL
154  *
155  * Returns:
156  * The id of @pll
157  */
158 enum intel_dpll_id
159 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
160 			 struct intel_shared_dpll *pll)
161 {
162 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
163 
164 	if (drm_WARN_ON(&dev_priv->drm,
165 			pll_idx < 0 ||
166 			pll_idx >= dev_priv->dpll.num_shared_dpll))
167 		return -1;
168 
169 	return pll_idx;
170 }
171 
172 /* For ILK+ */
173 void assert_shared_dpll(struct drm_i915_private *dev_priv,
174 			struct intel_shared_dpll *pll,
175 			bool state)
176 {
177 	bool cur_state;
178 	struct intel_dpll_hw_state hw_state;
179 
180 	if (drm_WARN(&dev_priv->drm, !pll,
181 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
182 		return;
183 
184 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
185 	I915_STATE_WARN(cur_state != state,
186 	     "%s assertion failure (expected %s, current %s)\n",
187 			pll->info->name, onoff(state), onoff(cur_state));
188 }
189 
190 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
191 {
192 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
193 }
194 
195 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
196 {
197 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
198 }
199 
200 static i915_reg_t
201 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
202 			   struct intel_shared_dpll *pll)
203 {
204 	if (IS_DG1(i915))
205 		return DG1_DPLL_ENABLE(pll->info->id);
206 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
207 		return MG_PLL_ENABLE(0);
208 
209 	return ICL_DPLL_ENABLE(pll->info->id);
210 }
211 
212 static i915_reg_t
213 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
214 			struct intel_shared_dpll *pll)
215 {
216 	const enum intel_dpll_id id = pll->info->id;
217 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
218 
219 	if (IS_ALDERLAKE_P(i915))
220 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
221 
222 	return MG_PLL_ENABLE(tc_port);
223 }
224 
225 /**
226  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
227  * @crtc_state: CRTC, and its state, which has a shared DPLL
228  *
229  * Enable the shared DPLL used by @crtc.
230  */
231 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
232 {
233 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
234 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
235 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
236 	unsigned int pipe_mask = BIT(crtc->pipe);
237 	unsigned int old_mask;
238 
239 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
240 		return;
241 
242 	mutex_lock(&dev_priv->dpll.lock);
243 	old_mask = pll->active_mask;
244 
245 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
246 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
247 		goto out;
248 
249 	pll->active_mask |= pipe_mask;
250 
251 	drm_dbg_kms(&dev_priv->drm,
252 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
253 		    pll->info->name, pll->active_mask, pll->on,
254 		    crtc->base.base.id, crtc->base.name);
255 
256 	if (old_mask) {
257 		drm_WARN_ON(&dev_priv->drm, !pll->on);
258 		assert_shared_dpll_enabled(dev_priv, pll);
259 		goto out;
260 	}
261 	drm_WARN_ON(&dev_priv->drm, pll->on);
262 
263 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
264 	pll->info->funcs->enable(dev_priv, pll);
265 	pll->on = true;
266 
267 out:
268 	mutex_unlock(&dev_priv->dpll.lock);
269 }
270 
271 /**
272  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
273  * @crtc_state: CRTC, and its state, which has a shared DPLL
274  *
275  * Disable the shared DPLL used by @crtc.
276  */
277 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
278 {
279 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
280 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
281 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
282 	unsigned int pipe_mask = BIT(crtc->pipe);
283 
284 	/* PCH only available on ILK+ */
285 	if (DISPLAY_VER(dev_priv) < 5)
286 		return;
287 
288 	if (pll == NULL)
289 		return;
290 
291 	mutex_lock(&dev_priv->dpll.lock);
292 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
293 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
294 		     crtc->base.base.id, crtc->base.name))
295 		goto out;
296 
297 	drm_dbg_kms(&dev_priv->drm,
298 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
299 		    pll->info->name, pll->active_mask, pll->on,
300 		    crtc->base.base.id, crtc->base.name);
301 
302 	assert_shared_dpll_enabled(dev_priv, pll);
303 	drm_WARN_ON(&dev_priv->drm, !pll->on);
304 
305 	pll->active_mask &= ~pipe_mask;
306 	if (pll->active_mask)
307 		goto out;
308 
309 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
310 	pll->info->funcs->disable(dev_priv, pll);
311 	pll->on = false;
312 
313 out:
314 	mutex_unlock(&dev_priv->dpll.lock);
315 }
316 
317 static struct intel_shared_dpll *
318 intel_find_shared_dpll(struct intel_atomic_state *state,
319 		       const struct intel_crtc *crtc,
320 		       const struct intel_dpll_hw_state *pll_state,
321 		       unsigned long dpll_mask)
322 {
323 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
324 	struct intel_shared_dpll *pll, *unused_pll = NULL;
325 	struct intel_shared_dpll_state *shared_dpll;
326 	enum intel_dpll_id i;
327 
328 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
329 
330 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
331 
332 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
333 		pll = &dev_priv->dpll.shared_dplls[i];
334 
335 		/* Only want to check enabled timings first */
336 		if (shared_dpll[i].pipe_mask == 0) {
337 			if (!unused_pll)
338 				unused_pll = pll;
339 			continue;
340 		}
341 
342 		if (memcmp(pll_state,
343 			   &shared_dpll[i].hw_state,
344 			   sizeof(*pll_state)) == 0) {
345 			drm_dbg_kms(&dev_priv->drm,
346 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
347 				    crtc->base.base.id, crtc->base.name,
348 				    pll->info->name,
349 				    shared_dpll[i].pipe_mask,
350 				    pll->active_mask);
351 			return pll;
352 		}
353 	}
354 
355 	/* Ok no matching timings, maybe there's a free one? */
356 	if (unused_pll) {
357 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
358 			    crtc->base.base.id, crtc->base.name,
359 			    unused_pll->info->name);
360 		return unused_pll;
361 	}
362 
363 	return NULL;
364 }
365 
366 static void
367 intel_reference_shared_dpll(struct intel_atomic_state *state,
368 			    const struct intel_crtc *crtc,
369 			    const struct intel_shared_dpll *pll,
370 			    const struct intel_dpll_hw_state *pll_state)
371 {
372 	struct drm_i915_private *i915 = to_i915(state->base.dev);
373 	struct intel_shared_dpll_state *shared_dpll;
374 	const enum intel_dpll_id id = pll->info->id;
375 
376 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
377 
378 	if (shared_dpll[id].pipe_mask == 0)
379 		shared_dpll[id].hw_state = *pll_state;
380 
381 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
382 		pipe_name(crtc->pipe));
383 
384 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
385 }
386 
387 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
388 					  const struct intel_crtc *crtc,
389 					  const struct intel_shared_dpll *pll)
390 {
391 	struct intel_shared_dpll_state *shared_dpll;
392 
393 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
394 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
395 }
396 
397 static void intel_put_dpll(struct intel_atomic_state *state,
398 			   struct intel_crtc *crtc)
399 {
400 	const struct intel_crtc_state *old_crtc_state =
401 		intel_atomic_get_old_crtc_state(state, crtc);
402 	struct intel_crtc_state *new_crtc_state =
403 		intel_atomic_get_new_crtc_state(state, crtc);
404 
405 	new_crtc_state->shared_dpll = NULL;
406 
407 	if (!old_crtc_state->shared_dpll)
408 		return;
409 
410 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
411 }
412 
413 /**
414  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
415  * @state: atomic state
416  *
417  * This is the dpll version of drm_atomic_helper_swap_state() since the
418  * helper does not handle driver-specific global state.
419  *
420  * For consistency with atomic helpers this function does a complete swap,
421  * i.e. it also puts the current state into @state, even though there is no
422  * need for that at this moment.
423  */
424 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
425 {
426 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
427 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
428 	enum intel_dpll_id i;
429 
430 	if (!state->dpll_set)
431 		return;
432 
433 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
434 		struct intel_shared_dpll *pll =
435 			&dev_priv->dpll.shared_dplls[i];
436 
437 		swap(pll->state, shared_dpll[i]);
438 	}
439 }
440 
441 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
442 				      struct intel_shared_dpll *pll,
443 				      struct intel_dpll_hw_state *hw_state)
444 {
445 	const enum intel_dpll_id id = pll->info->id;
446 	intel_wakeref_t wakeref;
447 	u32 val;
448 
449 	wakeref = intel_display_power_get_if_enabled(dev_priv,
450 						     POWER_DOMAIN_DISPLAY_CORE);
451 	if (!wakeref)
452 		return false;
453 
454 	val = intel_de_read(dev_priv, PCH_DPLL(id));
455 	hw_state->dpll = val;
456 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
457 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
458 
459 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
460 
461 	return val & DPLL_VCO_ENABLE;
462 }
463 
464 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
465 {
466 	u32 val;
467 	bool enabled;
468 
469 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
470 
471 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
472 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
473 			    DREF_SUPERSPREAD_SOURCE_MASK));
474 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
475 }
476 
477 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
478 				struct intel_shared_dpll *pll)
479 {
480 	const enum intel_dpll_id id = pll->info->id;
481 
482 	/* PCH refclock must be enabled first */
483 	ibx_assert_pch_refclk_enabled(dev_priv);
484 
485 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
486 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
487 
488 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
489 
490 	/* Wait for the clocks to stabilize. */
491 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
492 	udelay(150);
493 
494 	/* The pixel multiplier can only be updated once the
495 	 * DPLL is enabled and the clocks are stable.
496 	 *
497 	 * So write it again.
498 	 */
499 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
500 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
501 	udelay(200);
502 }
503 
504 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
505 				 struct intel_shared_dpll *pll)
506 {
507 	const enum intel_dpll_id id = pll->info->id;
508 
509 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
510 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
511 	udelay(200);
512 }
513 
514 static bool ibx_get_dpll(struct intel_atomic_state *state,
515 			 struct intel_crtc *crtc,
516 			 struct intel_encoder *encoder)
517 {
518 	struct intel_crtc_state *crtc_state =
519 		intel_atomic_get_new_crtc_state(state, crtc);
520 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
521 	struct intel_shared_dpll *pll;
522 	enum intel_dpll_id i;
523 
524 	if (HAS_PCH_IBX(dev_priv)) {
525 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
526 		i = (enum intel_dpll_id) crtc->pipe;
527 		pll = &dev_priv->dpll.shared_dplls[i];
528 
529 		drm_dbg_kms(&dev_priv->drm,
530 			    "[CRTC:%d:%s] using pre-allocated %s\n",
531 			    crtc->base.base.id, crtc->base.name,
532 			    pll->info->name);
533 	} else {
534 		pll = intel_find_shared_dpll(state, crtc,
535 					     &crtc_state->dpll_hw_state,
536 					     BIT(DPLL_ID_PCH_PLL_B) |
537 					     BIT(DPLL_ID_PCH_PLL_A));
538 	}
539 
540 	if (!pll)
541 		return false;
542 
543 	/* reference the pll */
544 	intel_reference_shared_dpll(state, crtc,
545 				    pll, &crtc_state->dpll_hw_state);
546 
547 	crtc_state->shared_dpll = pll;
548 
549 	return true;
550 }
551 
552 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
553 			      const struct intel_dpll_hw_state *hw_state)
554 {
555 	drm_dbg_kms(&dev_priv->drm,
556 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
557 		    "fp0: 0x%x, fp1: 0x%x\n",
558 		    hw_state->dpll,
559 		    hw_state->dpll_md,
560 		    hw_state->fp0,
561 		    hw_state->fp1);
562 }
563 
564 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
565 	.enable = ibx_pch_dpll_enable,
566 	.disable = ibx_pch_dpll_disable,
567 	.get_hw_state = ibx_pch_dpll_get_hw_state,
568 };
569 
570 static const struct dpll_info pch_plls[] = {
571 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
572 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
573 	{ },
574 };
575 
576 static const struct intel_dpll_mgr pch_pll_mgr = {
577 	.dpll_info = pch_plls,
578 	.get_dplls = ibx_get_dpll,
579 	.put_dplls = intel_put_dpll,
580 	.dump_hw_state = ibx_dump_hw_state,
581 };
582 
583 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
584 			       struct intel_shared_dpll *pll)
585 {
586 	const enum intel_dpll_id id = pll->info->id;
587 
588 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
589 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
590 	udelay(20);
591 }
592 
593 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
594 				struct intel_shared_dpll *pll)
595 {
596 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
597 	intel_de_posting_read(dev_priv, SPLL_CTL);
598 	udelay(20);
599 }
600 
601 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
602 				  struct intel_shared_dpll *pll)
603 {
604 	const enum intel_dpll_id id = pll->info->id;
605 	u32 val;
606 
607 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
608 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
609 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
610 
611 	/*
612 	 * Try to set up the PCH reference clock once all DPLLs
613 	 * that depend on it have been shut down.
614 	 */
615 	if (dev_priv->pch_ssc_use & BIT(id))
616 		intel_init_pch_refclk(dev_priv);
617 }
618 
619 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
620 				 struct intel_shared_dpll *pll)
621 {
622 	enum intel_dpll_id id = pll->info->id;
623 	u32 val;
624 
625 	val = intel_de_read(dev_priv, SPLL_CTL);
626 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
627 	intel_de_posting_read(dev_priv, SPLL_CTL);
628 
629 	/*
630 	 * Try to set up the PCH reference clock once all DPLLs
631 	 * that depend on it have been shut down.
632 	 */
633 	if (dev_priv->pch_ssc_use & BIT(id))
634 		intel_init_pch_refclk(dev_priv);
635 }
636 
637 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
638 				       struct intel_shared_dpll *pll,
639 				       struct intel_dpll_hw_state *hw_state)
640 {
641 	const enum intel_dpll_id id = pll->info->id;
642 	intel_wakeref_t wakeref;
643 	u32 val;
644 
645 	wakeref = intel_display_power_get_if_enabled(dev_priv,
646 						     POWER_DOMAIN_DISPLAY_CORE);
647 	if (!wakeref)
648 		return false;
649 
650 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
651 	hw_state->wrpll = val;
652 
653 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
654 
655 	return val & WRPLL_PLL_ENABLE;
656 }
657 
658 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
659 				      struct intel_shared_dpll *pll,
660 				      struct intel_dpll_hw_state *hw_state)
661 {
662 	intel_wakeref_t wakeref;
663 	u32 val;
664 
665 	wakeref = intel_display_power_get_if_enabled(dev_priv,
666 						     POWER_DOMAIN_DISPLAY_CORE);
667 	if (!wakeref)
668 		return false;
669 
670 	val = intel_de_read(dev_priv, SPLL_CTL);
671 	hw_state->spll = val;
672 
673 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
674 
675 	return val & SPLL_PLL_ENABLE;
676 }
677 
678 #define LC_FREQ 2700
679 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
680 
681 #define P_MIN 2
682 #define P_MAX 64
683 #define P_INC 2
684 
685 /* Constraints for PLL good behavior */
686 #define REF_MIN 48
687 #define REF_MAX 400
688 #define VCO_MIN 2400
689 #define VCO_MAX 4800
690 
691 struct hsw_wrpll_rnp {
692 	unsigned p, n2, r2;
693 };
694 
695 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
696 {
697 	unsigned budget;
698 
699 	switch (clock) {
700 	case 25175000:
701 	case 25200000:
702 	case 27000000:
703 	case 27027000:
704 	case 37762500:
705 	case 37800000:
706 	case 40500000:
707 	case 40541000:
708 	case 54000000:
709 	case 54054000:
710 	case 59341000:
711 	case 59400000:
712 	case 72000000:
713 	case 74176000:
714 	case 74250000:
715 	case 81000000:
716 	case 81081000:
717 	case 89012000:
718 	case 89100000:
719 	case 108000000:
720 	case 108108000:
721 	case 111264000:
722 	case 111375000:
723 	case 148352000:
724 	case 148500000:
725 	case 162000000:
726 	case 162162000:
727 	case 222525000:
728 	case 222750000:
729 	case 296703000:
730 	case 297000000:
731 		budget = 0;
732 		break;
733 	case 233500000:
734 	case 245250000:
735 	case 247750000:
736 	case 253250000:
737 	case 298000000:
738 		budget = 1500;
739 		break;
740 	case 169128000:
741 	case 169500000:
742 	case 179500000:
743 	case 202000000:
744 		budget = 2000;
745 		break;
746 	case 256250000:
747 	case 262500000:
748 	case 270000000:
749 	case 272500000:
750 	case 273750000:
751 	case 280750000:
752 	case 281250000:
753 	case 286000000:
754 	case 291750000:
755 		budget = 4000;
756 		break;
757 	case 267250000:
758 	case 268500000:
759 		budget = 5000;
760 		break;
761 	default:
762 		budget = 1000;
763 		break;
764 	}
765 
766 	return budget;
767 }
768 
769 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
770 				 unsigned int r2, unsigned int n2,
771 				 unsigned int p,
772 				 struct hsw_wrpll_rnp *best)
773 {
774 	u64 a, b, c, d, diff, diff_best;
775 
776 	/* No best (r,n,p) yet */
777 	if (best->p == 0) {
778 		best->p = p;
779 		best->n2 = n2;
780 		best->r2 = r2;
781 		return;
782 	}
783 
784 	/*
785 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
786 	 * freq2k.
787 	 *
788 	 * delta = 1e6 *
789 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
790 	 *	   freq2k;
791 	 *
792 	 * and we would like delta <= budget.
793 	 *
794 	 * If the discrepancy is above the PPM-based budget, always prefer to
795 	 * improve upon the previous solution.  However, if you're within the
796 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
797 	 */
798 	a = freq2k * budget * p * r2;
799 	b = freq2k * budget * best->p * best->r2;
800 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
801 	diff_best = abs_diff(freq2k * best->p * best->r2,
802 			     LC_FREQ_2K * best->n2);
803 	c = 1000000 * diff;
804 	d = 1000000 * diff_best;
805 
806 	if (a < c && b < d) {
807 		/* If both are above the budget, pick the closer */
808 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
809 			best->p = p;
810 			best->n2 = n2;
811 			best->r2 = r2;
812 		}
813 	} else if (a >= c && b < d) {
814 		/* If A is below the threshold but B is above it?  Update. */
815 		best->p = p;
816 		best->n2 = n2;
817 		best->r2 = r2;
818 	} else if (a >= c && b >= d) {
819 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
820 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
821 			best->p = p;
822 			best->n2 = n2;
823 			best->r2 = r2;
824 		}
825 	}
826 	/* Otherwise a < c && b >= d, do nothing */
827 }
828 
829 static void
830 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
831 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
832 {
833 	u64 freq2k;
834 	unsigned p, n2, r2;
835 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
836 	unsigned budget;
837 
838 	freq2k = clock / 100;
839 
840 	budget = hsw_wrpll_get_budget_for_freq(clock);
841 
842 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
843 	 * and directly pass the LC PLL to it. */
844 	if (freq2k == 5400000) {
845 		*n2_out = 2;
846 		*p_out = 1;
847 		*r2_out = 2;
848 		return;
849 	}
850 
851 	/*
852 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
853 	 * the WR PLL.
854 	 *
855 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
856 	 * Injecting R2 = 2 * R gives:
857 	 *   REF_MAX * r2 > LC_FREQ * 2 and
858 	 *   REF_MIN * r2 < LC_FREQ * 2
859 	 *
860 	 * Which means the desired boundaries for r2 are:
861 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
862 	 *
863 	 */
864 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
865 	     r2 <= LC_FREQ * 2 / REF_MIN;
866 	     r2++) {
867 
868 		/*
869 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
870 		 *
871 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
872 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
873 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
874 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
875 		 *
876 		 * Which means the desired boundaries for n2 are:
877 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
878 		 */
879 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
880 		     n2 <= VCO_MAX * r2 / LC_FREQ;
881 		     n2++) {
882 
883 			for (p = P_MIN; p <= P_MAX; p += P_INC)
884 				hsw_wrpll_update_rnp(freq2k, budget,
885 						     r2, n2, p, &best);
886 		}
887 	}
888 
889 	*n2_out = best.n2;
890 	*p_out = best.p;
891 	*r2_out = best.r2;
892 }
893 
894 static struct intel_shared_dpll *
895 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
896 		       struct intel_crtc *crtc)
897 {
898 	struct intel_crtc_state *crtc_state =
899 		intel_atomic_get_new_crtc_state(state, crtc);
900 	struct intel_shared_dpll *pll;
901 	u32 val;
902 	unsigned int p, n2, r2;
903 
904 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
905 
906 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
907 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
908 	      WRPLL_DIVIDER_POST(p);
909 
910 	crtc_state->dpll_hw_state.wrpll = val;
911 
912 	pll = intel_find_shared_dpll(state, crtc,
913 				     &crtc_state->dpll_hw_state,
914 				     BIT(DPLL_ID_WRPLL2) |
915 				     BIT(DPLL_ID_WRPLL1));
916 
917 	if (!pll)
918 		return NULL;
919 
920 	return pll;
921 }
922 
923 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
924 				  const struct intel_shared_dpll *pll,
925 				  const struct intel_dpll_hw_state *pll_state)
926 {
927 	int refclk;
928 	int n, p, r;
929 	u32 wrpll = pll_state->wrpll;
930 
931 	switch (wrpll & WRPLL_REF_MASK) {
932 	case WRPLL_REF_SPECIAL_HSW:
933 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
934 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
935 			refclk = dev_priv->dpll.ref_clks.nssc;
936 			break;
937 		}
938 		fallthrough;
939 	case WRPLL_REF_PCH_SSC:
940 		/*
941 		 * We could calculate spread here, but our checking
942 		 * code only cares about 5% accuracy, and spread is a max of
943 		 * 0.5% downspread.
944 		 */
945 		refclk = dev_priv->dpll.ref_clks.ssc;
946 		break;
947 	case WRPLL_REF_LCPLL:
948 		refclk = 2700000;
949 		break;
950 	default:
951 		MISSING_CASE(wrpll);
952 		return 0;
953 	}
954 
955 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
956 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
957 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
958 
959 	/* Convert to KHz, p & r have a fixed point portion */
960 	return (refclk * n / 10) / (p * r) * 2;
961 }
962 
963 static struct intel_shared_dpll *
964 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
965 {
966 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
967 	struct intel_shared_dpll *pll;
968 	enum intel_dpll_id pll_id;
969 	int clock = crtc_state->port_clock;
970 
971 	switch (clock / 2) {
972 	case 81000:
973 		pll_id = DPLL_ID_LCPLL_810;
974 		break;
975 	case 135000:
976 		pll_id = DPLL_ID_LCPLL_1350;
977 		break;
978 	case 270000:
979 		pll_id = DPLL_ID_LCPLL_2700;
980 		break;
981 	default:
982 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
983 			    clock);
984 		return NULL;
985 	}
986 
987 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
988 
989 	if (!pll)
990 		return NULL;
991 
992 	return pll;
993 }
994 
995 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
996 				  const struct intel_shared_dpll *pll,
997 				  const struct intel_dpll_hw_state *pll_state)
998 {
999 	int link_clock = 0;
1000 
1001 	switch (pll->info->id) {
1002 	case DPLL_ID_LCPLL_810:
1003 		link_clock = 81000;
1004 		break;
1005 	case DPLL_ID_LCPLL_1350:
1006 		link_clock = 135000;
1007 		break;
1008 	case DPLL_ID_LCPLL_2700:
1009 		link_clock = 270000;
1010 		break;
1011 	default:
1012 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1013 		break;
1014 	}
1015 
1016 	return link_clock * 2;
1017 }
1018 
1019 static struct intel_shared_dpll *
1020 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1021 		      struct intel_crtc *crtc)
1022 {
1023 	struct intel_crtc_state *crtc_state =
1024 		intel_atomic_get_new_crtc_state(state, crtc);
1025 
1026 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1027 		return NULL;
1028 
1029 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1030 					 SPLL_REF_MUXED_SSC;
1031 
1032 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1033 				      BIT(DPLL_ID_SPLL));
1034 }
1035 
1036 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1037 				 const struct intel_shared_dpll *pll,
1038 				 const struct intel_dpll_hw_state *pll_state)
1039 {
1040 	int link_clock = 0;
1041 
1042 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1043 	case SPLL_FREQ_810MHz:
1044 		link_clock = 81000;
1045 		break;
1046 	case SPLL_FREQ_1350MHz:
1047 		link_clock = 135000;
1048 		break;
1049 	case SPLL_FREQ_2700MHz:
1050 		link_clock = 270000;
1051 		break;
1052 	default:
1053 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1054 		break;
1055 	}
1056 
1057 	return link_clock * 2;
1058 }
1059 
1060 static bool hsw_get_dpll(struct intel_atomic_state *state,
1061 			 struct intel_crtc *crtc,
1062 			 struct intel_encoder *encoder)
1063 {
1064 	struct intel_crtc_state *crtc_state =
1065 		intel_atomic_get_new_crtc_state(state, crtc);
1066 	struct intel_shared_dpll *pll;
1067 
1068 	memset(&crtc_state->dpll_hw_state, 0,
1069 	       sizeof(crtc_state->dpll_hw_state));
1070 
1071 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1072 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1073 	else if (intel_crtc_has_dp_encoder(crtc_state))
1074 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1075 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1076 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1077 	else
1078 		return false;
1079 
1080 	if (!pll)
1081 		return false;
1082 
1083 	intel_reference_shared_dpll(state, crtc,
1084 				    pll, &crtc_state->dpll_hw_state);
1085 
1086 	crtc_state->shared_dpll = pll;
1087 
1088 	return true;
1089 }
1090 
1091 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1092 {
1093 	i915->dpll.ref_clks.ssc = 135000;
1094 	/* Non-SSC is only used on non-ULT HSW. */
1095 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1096 		i915->dpll.ref_clks.nssc = 24000;
1097 	else
1098 		i915->dpll.ref_clks.nssc = 135000;
1099 }
1100 
1101 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1102 			      const struct intel_dpll_hw_state *hw_state)
1103 {
1104 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1105 		    hw_state->wrpll, hw_state->spll);
1106 }
1107 
1108 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1109 	.enable = hsw_ddi_wrpll_enable,
1110 	.disable = hsw_ddi_wrpll_disable,
1111 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1112 	.get_freq = hsw_ddi_wrpll_get_freq,
1113 };
1114 
1115 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1116 	.enable = hsw_ddi_spll_enable,
1117 	.disable = hsw_ddi_spll_disable,
1118 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1119 	.get_freq = hsw_ddi_spll_get_freq,
1120 };
1121 
1122 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1123 				 struct intel_shared_dpll *pll)
1124 {
1125 }
1126 
1127 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1128 				  struct intel_shared_dpll *pll)
1129 {
1130 }
1131 
1132 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1133 				       struct intel_shared_dpll *pll,
1134 				       struct intel_dpll_hw_state *hw_state)
1135 {
1136 	return true;
1137 }
1138 
1139 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1140 	.enable = hsw_ddi_lcpll_enable,
1141 	.disable = hsw_ddi_lcpll_disable,
1142 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1143 	.get_freq = hsw_ddi_lcpll_get_freq,
1144 };
1145 
1146 static const struct dpll_info hsw_plls[] = {
1147 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1148 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1149 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1150 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1151 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1152 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1153 	{ },
1154 };
1155 
1156 static const struct intel_dpll_mgr hsw_pll_mgr = {
1157 	.dpll_info = hsw_plls,
1158 	.get_dplls = hsw_get_dpll,
1159 	.put_dplls = intel_put_dpll,
1160 	.update_ref_clks = hsw_update_dpll_ref_clks,
1161 	.dump_hw_state = hsw_dump_hw_state,
1162 };
1163 
1164 struct skl_dpll_regs {
1165 	i915_reg_t ctl, cfgcr1, cfgcr2;
1166 };
1167 
1168 /* this array is indexed by the *shared* pll id */
1169 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1170 	{
1171 		/* DPLL 0 */
1172 		.ctl = LCPLL1_CTL,
1173 		/* DPLL 0 doesn't support HDMI mode */
1174 	},
1175 	{
1176 		/* DPLL 1 */
1177 		.ctl = LCPLL2_CTL,
1178 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1179 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1180 	},
1181 	{
1182 		/* DPLL 2 */
1183 		.ctl = WRPLL_CTL(0),
1184 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1185 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1186 	},
1187 	{
1188 		/* DPLL 3 */
1189 		.ctl = WRPLL_CTL(1),
1190 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1191 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1192 	},
1193 };
1194 
1195 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1196 				    struct intel_shared_dpll *pll)
1197 {
1198 	const enum intel_dpll_id id = pll->info->id;
1199 	u32 val;
1200 
1201 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1202 
1203 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1204 		 DPLL_CTRL1_SSC(id) |
1205 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1206 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1207 
1208 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1209 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1210 }
1211 
1212 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1213 			       struct intel_shared_dpll *pll)
1214 {
1215 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1216 	const enum intel_dpll_id id = pll->info->id;
1217 
1218 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1219 
1220 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1221 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1222 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1223 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1224 
1225 	/* the enable bit is always bit 31 */
1226 	intel_de_write(dev_priv, regs[id].ctl,
1227 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1228 
1229 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1230 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1231 }
1232 
1233 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1234 				 struct intel_shared_dpll *pll)
1235 {
1236 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1237 }
1238 
1239 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1240 				struct intel_shared_dpll *pll)
1241 {
1242 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1243 	const enum intel_dpll_id id = pll->info->id;
1244 
1245 	/* the enable bit is always bit 31 */
1246 	intel_de_write(dev_priv, regs[id].ctl,
1247 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1248 	intel_de_posting_read(dev_priv, regs[id].ctl);
1249 }
1250 
1251 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1252 				  struct intel_shared_dpll *pll)
1253 {
1254 }
1255 
1256 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1257 				     struct intel_shared_dpll *pll,
1258 				     struct intel_dpll_hw_state *hw_state)
1259 {
1260 	u32 val;
1261 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1262 	const enum intel_dpll_id id = pll->info->id;
1263 	intel_wakeref_t wakeref;
1264 	bool ret;
1265 
1266 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1267 						     POWER_DOMAIN_DISPLAY_CORE);
1268 	if (!wakeref)
1269 		return false;
1270 
1271 	ret = false;
1272 
1273 	val = intel_de_read(dev_priv, regs[id].ctl);
1274 	if (!(val & LCPLL_PLL_ENABLE))
1275 		goto out;
1276 
1277 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1278 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1279 
1280 	/* avoid reading back stale values if HDMI mode is not enabled */
1281 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1282 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1283 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1284 	}
1285 	ret = true;
1286 
1287 out:
1288 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1289 
1290 	return ret;
1291 }
1292 
1293 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1294 				       struct intel_shared_dpll *pll,
1295 				       struct intel_dpll_hw_state *hw_state)
1296 {
1297 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1298 	const enum intel_dpll_id id = pll->info->id;
1299 	intel_wakeref_t wakeref;
1300 	u32 val;
1301 	bool ret;
1302 
1303 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1304 						     POWER_DOMAIN_DISPLAY_CORE);
1305 	if (!wakeref)
1306 		return false;
1307 
1308 	ret = false;
1309 
1310 	/* DPLL0 is always enabled since it drives CDCLK */
1311 	val = intel_de_read(dev_priv, regs[id].ctl);
1312 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1313 		goto out;
1314 
1315 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1316 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1317 
1318 	ret = true;
1319 
1320 out:
1321 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1322 
1323 	return ret;
1324 }
1325 
1326 struct skl_wrpll_context {
1327 	u64 min_deviation;		/* current minimal deviation */
1328 	u64 central_freq;		/* chosen central freq */
1329 	u64 dco_freq;			/* chosen dco freq */
1330 	unsigned int p;			/* chosen divider */
1331 };
1332 
1333 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1334 {
1335 	memset(ctx, 0, sizeof(*ctx));
1336 
1337 	ctx->min_deviation = U64_MAX;
1338 }
1339 
1340 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1341 #define SKL_DCO_MAX_PDEVIATION	100
1342 #define SKL_DCO_MAX_NDEVIATION	600
1343 
1344 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1345 				  u64 central_freq,
1346 				  u64 dco_freq,
1347 				  unsigned int divider)
1348 {
1349 	u64 deviation;
1350 
1351 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1352 			      central_freq);
1353 
1354 	/* positive deviation */
1355 	if (dco_freq >= central_freq) {
1356 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1357 		    deviation < ctx->min_deviation) {
1358 			ctx->min_deviation = deviation;
1359 			ctx->central_freq = central_freq;
1360 			ctx->dco_freq = dco_freq;
1361 			ctx->p = divider;
1362 		}
1363 	/* negative deviation */
1364 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1365 		   deviation < ctx->min_deviation) {
1366 		ctx->min_deviation = deviation;
1367 		ctx->central_freq = central_freq;
1368 		ctx->dco_freq = dco_freq;
1369 		ctx->p = divider;
1370 	}
1371 }
1372 
1373 static void skl_wrpll_get_multipliers(unsigned int p,
1374 				      unsigned int *p0 /* out */,
1375 				      unsigned int *p1 /* out */,
1376 				      unsigned int *p2 /* out */)
1377 {
1378 	/* even dividers */
1379 	if (p % 2 == 0) {
1380 		unsigned int half = p / 2;
1381 
1382 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1383 			*p0 = 2;
1384 			*p1 = 1;
1385 			*p2 = half;
1386 		} else if (half % 2 == 0) {
1387 			*p0 = 2;
1388 			*p1 = half / 2;
1389 			*p2 = 2;
1390 		} else if (half % 3 == 0) {
1391 			*p0 = 3;
1392 			*p1 = half / 3;
1393 			*p2 = 2;
1394 		} else if (half % 7 == 0) {
1395 			*p0 = 7;
1396 			*p1 = half / 7;
1397 			*p2 = 2;
1398 		}
1399 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1400 		*p0 = 3;
1401 		*p1 = 1;
1402 		*p2 = p / 3;
1403 	} else if (p == 5 || p == 7) {
1404 		*p0 = p;
1405 		*p1 = 1;
1406 		*p2 = 1;
1407 	} else if (p == 15) {
1408 		*p0 = 3;
1409 		*p1 = 1;
1410 		*p2 = 5;
1411 	} else if (p == 21) {
1412 		*p0 = 7;
1413 		*p1 = 1;
1414 		*p2 = 3;
1415 	} else if (p == 35) {
1416 		*p0 = 7;
1417 		*p1 = 1;
1418 		*p2 = 5;
1419 	}
1420 }
1421 
1422 struct skl_wrpll_params {
1423 	u32 dco_fraction;
1424 	u32 dco_integer;
1425 	u32 qdiv_ratio;
1426 	u32 qdiv_mode;
1427 	u32 kdiv;
1428 	u32 pdiv;
1429 	u32 central_freq;
1430 };
1431 
1432 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1433 				      u64 afe_clock,
1434 				      int ref_clock,
1435 				      u64 central_freq,
1436 				      u32 p0, u32 p1, u32 p2)
1437 {
1438 	u64 dco_freq;
1439 
1440 	switch (central_freq) {
1441 	case 9600000000ULL:
1442 		params->central_freq = 0;
1443 		break;
1444 	case 9000000000ULL:
1445 		params->central_freq = 1;
1446 		break;
1447 	case 8400000000ULL:
1448 		params->central_freq = 3;
1449 	}
1450 
1451 	switch (p0) {
1452 	case 1:
1453 		params->pdiv = 0;
1454 		break;
1455 	case 2:
1456 		params->pdiv = 1;
1457 		break;
1458 	case 3:
1459 		params->pdiv = 2;
1460 		break;
1461 	case 7:
1462 		params->pdiv = 4;
1463 		break;
1464 	default:
1465 		WARN(1, "Incorrect PDiv\n");
1466 	}
1467 
1468 	switch (p2) {
1469 	case 5:
1470 		params->kdiv = 0;
1471 		break;
1472 	case 2:
1473 		params->kdiv = 1;
1474 		break;
1475 	case 3:
1476 		params->kdiv = 2;
1477 		break;
1478 	case 1:
1479 		params->kdiv = 3;
1480 		break;
1481 	default:
1482 		WARN(1, "Incorrect KDiv\n");
1483 	}
1484 
1485 	params->qdiv_ratio = p1;
1486 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1487 
1488 	dco_freq = p0 * p1 * p2 * afe_clock;
1489 
1490 	/*
1491 	 * Intermediate values are in Hz.
1492 	 * Divide by MHz to match bsepc
1493 	 */
1494 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1495 	params->dco_fraction =
1496 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1497 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1498 }
1499 
1500 static bool
1501 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1502 			int ref_clock,
1503 			struct skl_wrpll_params *wrpll_params)
1504 {
1505 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1506 	u64 dco_central_freq[3] = { 8400000000ULL,
1507 				    9000000000ULL,
1508 				    9600000000ULL };
1509 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1510 					     24, 28, 30, 32, 36, 40, 42, 44,
1511 					     48, 52, 54, 56, 60, 64, 66, 68,
1512 					     70, 72, 76, 78, 80, 84, 88, 90,
1513 					     92, 96, 98 };
1514 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1515 	static const struct {
1516 		const int *list;
1517 		int n_dividers;
1518 	} dividers[] = {
1519 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1520 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1521 	};
1522 	struct skl_wrpll_context ctx;
1523 	unsigned int dco, d, i;
1524 	unsigned int p0, p1, p2;
1525 
1526 	skl_wrpll_context_init(&ctx);
1527 
1528 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1529 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1530 			for (i = 0; i < dividers[d].n_dividers; i++) {
1531 				unsigned int p = dividers[d].list[i];
1532 				u64 dco_freq = p * afe_clock;
1533 
1534 				skl_wrpll_try_divider(&ctx,
1535 						      dco_central_freq[dco],
1536 						      dco_freq,
1537 						      p);
1538 				/*
1539 				 * Skip the remaining dividers if we're sure to
1540 				 * have found the definitive divider, we can't
1541 				 * improve a 0 deviation.
1542 				 */
1543 				if (ctx.min_deviation == 0)
1544 					goto skip_remaining_dividers;
1545 			}
1546 		}
1547 
1548 skip_remaining_dividers:
1549 		/*
1550 		 * If a solution is found with an even divider, prefer
1551 		 * this one.
1552 		 */
1553 		if (d == 0 && ctx.p)
1554 			break;
1555 	}
1556 
1557 	if (!ctx.p) {
1558 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1559 		return false;
1560 	}
1561 
1562 	/*
1563 	 * gcc incorrectly analyses that these can be used without being
1564 	 * initialized. To be fair, it's hard to guess.
1565 	 */
1566 	p0 = p1 = p2 = 0;
1567 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1568 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1569 				  ctx.central_freq, p0, p1, p2);
1570 
1571 	return true;
1572 }
1573 
1574 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1575 {
1576 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1577 	u32 ctrl1, cfgcr1, cfgcr2;
1578 	struct skl_wrpll_params wrpll_params = { 0, };
1579 
1580 	/*
1581 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1582 	 * as the DPLL id in this function.
1583 	 */
1584 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1585 
1586 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1587 
1588 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1589 				     i915->dpll.ref_clks.nssc,
1590 				     &wrpll_params))
1591 		return false;
1592 
1593 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1594 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1595 		wrpll_params.dco_integer;
1596 
1597 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1598 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1599 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1600 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1601 		wrpll_params.central_freq;
1602 
1603 	memset(&crtc_state->dpll_hw_state, 0,
1604 	       sizeof(crtc_state->dpll_hw_state));
1605 
1606 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1607 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1608 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1609 	return true;
1610 }
1611 
1612 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1613 				  const struct intel_shared_dpll *pll,
1614 				  const struct intel_dpll_hw_state *pll_state)
1615 {
1616 	int ref_clock = i915->dpll.ref_clks.nssc;
1617 	u32 p0, p1, p2, dco_freq;
1618 
1619 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1620 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1621 
1622 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1623 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1624 	else
1625 		p1 = 1;
1626 
1627 
1628 	switch (p0) {
1629 	case DPLL_CFGCR2_PDIV_1:
1630 		p0 = 1;
1631 		break;
1632 	case DPLL_CFGCR2_PDIV_2:
1633 		p0 = 2;
1634 		break;
1635 	case DPLL_CFGCR2_PDIV_3:
1636 		p0 = 3;
1637 		break;
1638 	case DPLL_CFGCR2_PDIV_7_INVALID:
1639 		/*
1640 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1641 		 * handling it the same way as PDIV_7.
1642 		 */
1643 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1644 		fallthrough;
1645 	case DPLL_CFGCR2_PDIV_7:
1646 		p0 = 7;
1647 		break;
1648 	default:
1649 		MISSING_CASE(p0);
1650 		return 0;
1651 	}
1652 
1653 	switch (p2) {
1654 	case DPLL_CFGCR2_KDIV_5:
1655 		p2 = 5;
1656 		break;
1657 	case DPLL_CFGCR2_KDIV_2:
1658 		p2 = 2;
1659 		break;
1660 	case DPLL_CFGCR2_KDIV_3:
1661 		p2 = 3;
1662 		break;
1663 	case DPLL_CFGCR2_KDIV_1:
1664 		p2 = 1;
1665 		break;
1666 	default:
1667 		MISSING_CASE(p2);
1668 		return 0;
1669 	}
1670 
1671 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1672 		   ref_clock;
1673 
1674 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1675 		    ref_clock / 0x8000;
1676 
1677 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1678 		return 0;
1679 
1680 	return dco_freq / (p0 * p1 * p2 * 5);
1681 }
1682 
1683 static bool
1684 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1685 {
1686 	u32 ctrl1;
1687 
1688 	/*
1689 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1690 	 * as the DPLL id in this function.
1691 	 */
1692 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1693 	switch (crtc_state->port_clock / 2) {
1694 	case 81000:
1695 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1696 		break;
1697 	case 135000:
1698 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1699 		break;
1700 	case 270000:
1701 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1702 		break;
1703 		/* eDP 1.4 rates */
1704 	case 162000:
1705 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1706 		break;
1707 	case 108000:
1708 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1709 		break;
1710 	case 216000:
1711 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1712 		break;
1713 	}
1714 
1715 	memset(&crtc_state->dpll_hw_state, 0,
1716 	       sizeof(crtc_state->dpll_hw_state));
1717 
1718 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1719 
1720 	return true;
1721 }
1722 
1723 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1724 				  const struct intel_shared_dpll *pll,
1725 				  const struct intel_dpll_hw_state *pll_state)
1726 {
1727 	int link_clock = 0;
1728 
1729 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1730 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1731 	case DPLL_CTRL1_LINK_RATE_810:
1732 		link_clock = 81000;
1733 		break;
1734 	case DPLL_CTRL1_LINK_RATE_1080:
1735 		link_clock = 108000;
1736 		break;
1737 	case DPLL_CTRL1_LINK_RATE_1350:
1738 		link_clock = 135000;
1739 		break;
1740 	case DPLL_CTRL1_LINK_RATE_1620:
1741 		link_clock = 162000;
1742 		break;
1743 	case DPLL_CTRL1_LINK_RATE_2160:
1744 		link_clock = 216000;
1745 		break;
1746 	case DPLL_CTRL1_LINK_RATE_2700:
1747 		link_clock = 270000;
1748 		break;
1749 	default:
1750 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1751 		break;
1752 	}
1753 
1754 	return link_clock * 2;
1755 }
1756 
1757 static bool skl_get_dpll(struct intel_atomic_state *state,
1758 			 struct intel_crtc *crtc,
1759 			 struct intel_encoder *encoder)
1760 {
1761 	struct intel_crtc_state *crtc_state =
1762 		intel_atomic_get_new_crtc_state(state, crtc);
1763 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1764 	struct intel_shared_dpll *pll;
1765 	bool bret;
1766 
1767 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1768 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1769 		if (!bret) {
1770 			drm_dbg_kms(&i915->drm,
1771 				    "Could not get HDMI pll dividers.\n");
1772 			return false;
1773 		}
1774 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1775 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1776 		if (!bret) {
1777 			drm_dbg_kms(&i915->drm,
1778 				    "Could not set DP dpll HW state.\n");
1779 			return false;
1780 		}
1781 	} else {
1782 		return false;
1783 	}
1784 
1785 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1786 		pll = intel_find_shared_dpll(state, crtc,
1787 					     &crtc_state->dpll_hw_state,
1788 					     BIT(DPLL_ID_SKL_DPLL0));
1789 	else
1790 		pll = intel_find_shared_dpll(state, crtc,
1791 					     &crtc_state->dpll_hw_state,
1792 					     BIT(DPLL_ID_SKL_DPLL3) |
1793 					     BIT(DPLL_ID_SKL_DPLL2) |
1794 					     BIT(DPLL_ID_SKL_DPLL1));
1795 	if (!pll)
1796 		return false;
1797 
1798 	intel_reference_shared_dpll(state, crtc,
1799 				    pll, &crtc_state->dpll_hw_state);
1800 
1801 	crtc_state->shared_dpll = pll;
1802 
1803 	return true;
1804 }
1805 
1806 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1807 				const struct intel_shared_dpll *pll,
1808 				const struct intel_dpll_hw_state *pll_state)
1809 {
1810 	/*
1811 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1812 	 * the internal shift for each field
1813 	 */
1814 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1815 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1816 	else
1817 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1818 }
1819 
1820 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1821 {
1822 	/* No SSC ref */
1823 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1824 }
1825 
1826 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1827 			      const struct intel_dpll_hw_state *hw_state)
1828 {
1829 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1830 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1831 		      hw_state->ctrl1,
1832 		      hw_state->cfgcr1,
1833 		      hw_state->cfgcr2);
1834 }
1835 
1836 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1837 	.enable = skl_ddi_pll_enable,
1838 	.disable = skl_ddi_pll_disable,
1839 	.get_hw_state = skl_ddi_pll_get_hw_state,
1840 	.get_freq = skl_ddi_pll_get_freq,
1841 };
1842 
1843 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1844 	.enable = skl_ddi_dpll0_enable,
1845 	.disable = skl_ddi_dpll0_disable,
1846 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1847 	.get_freq = skl_ddi_pll_get_freq,
1848 };
1849 
1850 static const struct dpll_info skl_plls[] = {
1851 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1852 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1853 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1854 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1855 	{ },
1856 };
1857 
1858 static const struct intel_dpll_mgr skl_pll_mgr = {
1859 	.dpll_info = skl_plls,
1860 	.get_dplls = skl_get_dpll,
1861 	.put_dplls = intel_put_dpll,
1862 	.update_ref_clks = skl_update_dpll_ref_clks,
1863 	.dump_hw_state = skl_dump_hw_state,
1864 };
1865 
1866 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1867 				struct intel_shared_dpll *pll)
1868 {
1869 	u32 temp;
1870 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1871 	enum dpio_phy phy;
1872 	enum dpio_channel ch;
1873 
1874 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1875 
1876 	/* Non-SSC reference */
1877 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1878 	temp |= PORT_PLL_REF_SEL;
1879 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1880 
1881 	if (IS_GEMINILAKE(dev_priv)) {
1882 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1883 		temp |= PORT_PLL_POWER_ENABLE;
1884 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1885 
1886 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1887 				 PORT_PLL_POWER_STATE), 200))
1888 			drm_err(&dev_priv->drm,
1889 				"Power state not set for PLL:%d\n", port);
1890 	}
1891 
1892 	/* Disable 10 bit clock */
1893 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1894 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1895 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1896 
1897 	/* Write P1 & P2 */
1898 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1899 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1900 	temp |= pll->state.hw_state.ebb0;
1901 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1902 
1903 	/* Write M2 integer */
1904 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1905 	temp &= ~PORT_PLL_M2_MASK;
1906 	temp |= pll->state.hw_state.pll0;
1907 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1908 
1909 	/* Write N */
1910 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1911 	temp &= ~PORT_PLL_N_MASK;
1912 	temp |= pll->state.hw_state.pll1;
1913 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1914 
1915 	/* Write M2 fraction */
1916 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1917 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1918 	temp |= pll->state.hw_state.pll2;
1919 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1920 
1921 	/* Write M2 fraction enable */
1922 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1923 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1924 	temp |= pll->state.hw_state.pll3;
1925 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1926 
1927 	/* Write coeff */
1928 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1929 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1930 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1931 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1932 	temp |= pll->state.hw_state.pll6;
1933 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1934 
1935 	/* Write calibration val */
1936 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1937 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1938 	temp |= pll->state.hw_state.pll8;
1939 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1940 
1941 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1942 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1943 	temp |= pll->state.hw_state.pll9;
1944 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1945 
1946 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1947 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1948 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1949 	temp |= pll->state.hw_state.pll10;
1950 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1951 
1952 	/* Recalibrate with new settings */
1953 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1954 	temp |= PORT_PLL_RECALIBRATE;
1955 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1956 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1957 	temp |= pll->state.hw_state.ebb4;
1958 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1959 
1960 	/* Enable PLL */
1961 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1962 	temp |= PORT_PLL_ENABLE;
1963 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1964 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1965 
1966 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1967 			200))
1968 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1969 
1970 	if (IS_GEMINILAKE(dev_priv)) {
1971 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1972 		temp |= DCC_DELAY_RANGE_2;
1973 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1974 	}
1975 
1976 	/*
1977 	 * While we write to the group register to program all lanes at once we
1978 	 * can read only lane registers and we pick lanes 0/1 for that.
1979 	 */
1980 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1981 	temp &= ~LANE_STAGGER_MASK;
1982 	temp &= ~LANESTAGGER_STRAP_OVRD;
1983 	temp |= pll->state.hw_state.pcsdw12;
1984 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1985 }
1986 
1987 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1988 					struct intel_shared_dpll *pll)
1989 {
1990 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1991 	u32 temp;
1992 
1993 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1994 	temp &= ~PORT_PLL_ENABLE;
1995 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1996 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1997 
1998 	if (IS_GEMINILAKE(dev_priv)) {
1999 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2000 		temp &= ~PORT_PLL_POWER_ENABLE;
2001 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2002 
2003 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2004 				  PORT_PLL_POWER_STATE), 200))
2005 			drm_err(&dev_priv->drm,
2006 				"Power state not reset for PLL:%d\n", port);
2007 	}
2008 }
2009 
2010 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2011 					struct intel_shared_dpll *pll,
2012 					struct intel_dpll_hw_state *hw_state)
2013 {
2014 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2015 	intel_wakeref_t wakeref;
2016 	enum dpio_phy phy;
2017 	enum dpio_channel ch;
2018 	u32 val;
2019 	bool ret;
2020 
2021 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2022 
2023 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2024 						     POWER_DOMAIN_DISPLAY_CORE);
2025 	if (!wakeref)
2026 		return false;
2027 
2028 	ret = false;
2029 
2030 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2031 	if (!(val & PORT_PLL_ENABLE))
2032 		goto out;
2033 
2034 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2035 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2036 
2037 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2038 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2039 
2040 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2041 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2042 
2043 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2044 	hw_state->pll1 &= PORT_PLL_N_MASK;
2045 
2046 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2047 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2048 
2049 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2050 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2051 
2052 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2053 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2054 			  PORT_PLL_INT_COEFF_MASK |
2055 			  PORT_PLL_GAIN_CTL_MASK;
2056 
2057 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2058 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2059 
2060 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2061 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2062 
2063 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2064 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2065 			   PORT_PLL_DCO_AMP_MASK;
2066 
2067 	/*
2068 	 * While we write to the group register to program all lanes at once we
2069 	 * can read only lane registers. We configure all lanes the same way, so
2070 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2071 	 */
2072 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2073 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2074 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2075 		drm_dbg(&dev_priv->drm,
2076 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2077 			hw_state->pcsdw12,
2078 			intel_de_read(dev_priv,
2079 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2080 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2081 
2082 	ret = true;
2083 
2084 out:
2085 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2086 
2087 	return ret;
2088 }
2089 
2090 /* bxt clock parameters */
2091 struct bxt_clk_div {
2092 	int clock;
2093 	u32 p1;
2094 	u32 p2;
2095 	u32 m2_int;
2096 	u32 m2_frac;
2097 	bool m2_frac_en;
2098 	u32 n;
2099 
2100 	int vco;
2101 };
2102 
2103 /* pre-calculated values for DP linkrates */
2104 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2105 	{162000, 4, 2, 32, 1677722, 1, 1},
2106 	{270000, 4, 1, 27,       0, 0, 1},
2107 	{540000, 2, 1, 27,       0, 0, 1},
2108 	{216000, 3, 2, 32, 1677722, 1, 1},
2109 	{243000, 4, 1, 24, 1258291, 1, 1},
2110 	{324000, 4, 1, 32, 1677722, 1, 1},
2111 	{432000, 3, 1, 32, 1677722, 1, 1}
2112 };
2113 
2114 static bool
2115 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2116 			  struct bxt_clk_div *clk_div)
2117 {
2118 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2119 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2120 	struct dpll best_clock;
2121 
2122 	/* Calculate HDMI div */
2123 	/*
2124 	 * FIXME: tie the following calculation into
2125 	 * i9xx_crtc_compute_clock
2126 	 */
2127 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2128 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2129 			crtc_state->port_clock,
2130 			pipe_name(crtc->pipe));
2131 		return false;
2132 	}
2133 
2134 	clk_div->p1 = best_clock.p1;
2135 	clk_div->p2 = best_clock.p2;
2136 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2137 	clk_div->n = best_clock.n;
2138 	clk_div->m2_int = best_clock.m2 >> 22;
2139 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2140 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2141 
2142 	clk_div->vco = best_clock.vco;
2143 
2144 	return true;
2145 }
2146 
2147 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2148 				    struct bxt_clk_div *clk_div)
2149 {
2150 	int clock = crtc_state->port_clock;
2151 	int i;
2152 
2153 	*clk_div = bxt_dp_clk_val[0];
2154 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2155 		if (bxt_dp_clk_val[i].clock == clock) {
2156 			*clk_div = bxt_dp_clk_val[i];
2157 			break;
2158 		}
2159 	}
2160 
2161 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2162 }
2163 
2164 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2165 				      const struct bxt_clk_div *clk_div)
2166 {
2167 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2168 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2169 	int clock = crtc_state->port_clock;
2170 	int vco = clk_div->vco;
2171 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2172 	u32 lanestagger;
2173 
2174 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2175 
2176 	if (vco >= 6200000 && vco <= 6700000) {
2177 		prop_coef = 4;
2178 		int_coef = 9;
2179 		gain_ctl = 3;
2180 		targ_cnt = 8;
2181 	} else if ((vco > 5400000 && vco < 6200000) ||
2182 			(vco >= 4800000 && vco < 5400000)) {
2183 		prop_coef = 5;
2184 		int_coef = 11;
2185 		gain_ctl = 3;
2186 		targ_cnt = 9;
2187 	} else if (vco == 5400000) {
2188 		prop_coef = 3;
2189 		int_coef = 8;
2190 		gain_ctl = 1;
2191 		targ_cnt = 9;
2192 	} else {
2193 		drm_err(&i915->drm, "Invalid VCO\n");
2194 		return false;
2195 	}
2196 
2197 	if (clock > 270000)
2198 		lanestagger = 0x18;
2199 	else if (clock > 135000)
2200 		lanestagger = 0x0d;
2201 	else if (clock > 67000)
2202 		lanestagger = 0x07;
2203 	else if (clock > 33000)
2204 		lanestagger = 0x04;
2205 	else
2206 		lanestagger = 0x02;
2207 
2208 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2209 	dpll_hw_state->pll0 = clk_div->m2_int;
2210 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2211 	dpll_hw_state->pll2 = clk_div->m2_frac;
2212 
2213 	if (clk_div->m2_frac_en)
2214 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2215 
2216 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2217 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2218 
2219 	dpll_hw_state->pll8 = targ_cnt;
2220 
2221 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2222 
2223 	dpll_hw_state->pll10 =
2224 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2225 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2226 
2227 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2228 
2229 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2230 
2231 	return true;
2232 }
2233 
2234 static bool
2235 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2236 {
2237 	struct bxt_clk_div clk_div = {};
2238 
2239 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2240 
2241 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2242 }
2243 
2244 static bool
2245 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2246 {
2247 	struct bxt_clk_div clk_div = {};
2248 
2249 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2250 
2251 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2252 }
2253 
2254 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2255 				const struct intel_shared_dpll *pll,
2256 				const struct intel_dpll_hw_state *pll_state)
2257 {
2258 	struct dpll clock;
2259 
2260 	clock.m1 = 2;
2261 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2262 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2263 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2264 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2265 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2266 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2267 
2268 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2269 }
2270 
2271 static bool bxt_get_dpll(struct intel_atomic_state *state,
2272 			 struct intel_crtc *crtc,
2273 			 struct intel_encoder *encoder)
2274 {
2275 	struct intel_crtc_state *crtc_state =
2276 		intel_atomic_get_new_crtc_state(state, crtc);
2277 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2278 	struct intel_shared_dpll *pll;
2279 	enum intel_dpll_id id;
2280 
2281 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2282 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2283 		return false;
2284 
2285 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2286 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2287 		return false;
2288 
2289 	/* 1:1 mapping between ports and PLLs */
2290 	id = (enum intel_dpll_id) encoder->port;
2291 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2292 
2293 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2294 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2295 
2296 	intel_reference_shared_dpll(state, crtc,
2297 				    pll, &crtc_state->dpll_hw_state);
2298 
2299 	crtc_state->shared_dpll = pll;
2300 
2301 	return true;
2302 }
2303 
2304 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2305 {
2306 	i915->dpll.ref_clks.ssc = 100000;
2307 	i915->dpll.ref_clks.nssc = 100000;
2308 	/* DSI non-SSC ref 19.2MHz */
2309 }
2310 
2311 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2312 			      const struct intel_dpll_hw_state *hw_state)
2313 {
2314 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2315 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2316 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2317 		    hw_state->ebb0,
2318 		    hw_state->ebb4,
2319 		    hw_state->pll0,
2320 		    hw_state->pll1,
2321 		    hw_state->pll2,
2322 		    hw_state->pll3,
2323 		    hw_state->pll6,
2324 		    hw_state->pll8,
2325 		    hw_state->pll9,
2326 		    hw_state->pll10,
2327 		    hw_state->pcsdw12);
2328 }
2329 
2330 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2331 	.enable = bxt_ddi_pll_enable,
2332 	.disable = bxt_ddi_pll_disable,
2333 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2334 	.get_freq = bxt_ddi_pll_get_freq,
2335 };
2336 
2337 static const struct dpll_info bxt_plls[] = {
2338 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2339 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2340 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2341 	{ },
2342 };
2343 
2344 static const struct intel_dpll_mgr bxt_pll_mgr = {
2345 	.dpll_info = bxt_plls,
2346 	.get_dplls = bxt_get_dpll,
2347 	.put_dplls = intel_put_dpll,
2348 	.update_ref_clks = bxt_update_dpll_ref_clks,
2349 	.dump_hw_state = bxt_dump_hw_state,
2350 };
2351 
2352 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2353 				      int *qdiv, int *kdiv)
2354 {
2355 	/* even dividers */
2356 	if (bestdiv % 2 == 0) {
2357 		if (bestdiv == 2) {
2358 			*pdiv = 2;
2359 			*qdiv = 1;
2360 			*kdiv = 1;
2361 		} else if (bestdiv % 4 == 0) {
2362 			*pdiv = 2;
2363 			*qdiv = bestdiv / 4;
2364 			*kdiv = 2;
2365 		} else if (bestdiv % 6 == 0) {
2366 			*pdiv = 3;
2367 			*qdiv = bestdiv / 6;
2368 			*kdiv = 2;
2369 		} else if (bestdiv % 5 == 0) {
2370 			*pdiv = 5;
2371 			*qdiv = bestdiv / 10;
2372 			*kdiv = 2;
2373 		} else if (bestdiv % 14 == 0) {
2374 			*pdiv = 7;
2375 			*qdiv = bestdiv / 14;
2376 			*kdiv = 2;
2377 		}
2378 	} else {
2379 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2380 			*pdiv = bestdiv;
2381 			*qdiv = 1;
2382 			*kdiv = 1;
2383 		} else { /* 9, 15, 21 */
2384 			*pdiv = bestdiv / 3;
2385 			*qdiv = 1;
2386 			*kdiv = 3;
2387 		}
2388 	}
2389 }
2390 
2391 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2392 				      u32 dco_freq, u32 ref_freq,
2393 				      int pdiv, int qdiv, int kdiv)
2394 {
2395 	u32 dco;
2396 
2397 	switch (kdiv) {
2398 	case 1:
2399 		params->kdiv = 1;
2400 		break;
2401 	case 2:
2402 		params->kdiv = 2;
2403 		break;
2404 	case 3:
2405 		params->kdiv = 4;
2406 		break;
2407 	default:
2408 		WARN(1, "Incorrect KDiv\n");
2409 	}
2410 
2411 	switch (pdiv) {
2412 	case 2:
2413 		params->pdiv = 1;
2414 		break;
2415 	case 3:
2416 		params->pdiv = 2;
2417 		break;
2418 	case 5:
2419 		params->pdiv = 4;
2420 		break;
2421 	case 7:
2422 		params->pdiv = 8;
2423 		break;
2424 	default:
2425 		WARN(1, "Incorrect PDiv\n");
2426 	}
2427 
2428 	WARN_ON(kdiv != 2 && qdiv != 1);
2429 
2430 	params->qdiv_ratio = qdiv;
2431 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2432 
2433 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2434 
2435 	params->dco_integer = dco >> 15;
2436 	params->dco_fraction = dco & 0x7fff;
2437 }
2438 
2439 /*
2440  * Display WA #22010492432: ehl, tgl, adl-p
2441  * Program half of the nominal DCO divider fraction value.
2442  */
2443 static bool
2444 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2445 {
2446 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2447 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2448 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
2449 		 i915->dpll.ref_clks.nssc == 38400;
2450 }
2451 
2452 struct icl_combo_pll_params {
2453 	int clock;
2454 	struct skl_wrpll_params wrpll;
2455 };
2456 
2457 /*
2458  * These values alrea already adjusted: they're the bits we write to the
2459  * registers, not the logical values.
2460  */
2461 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2462 	{ 540000,
2463 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2464 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2465 	{ 270000,
2466 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2467 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2468 	{ 162000,
2469 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2470 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2471 	{ 324000,
2472 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2473 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2474 	{ 216000,
2475 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2476 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2477 	{ 432000,
2478 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2479 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2480 	{ 648000,
2481 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2482 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2483 	{ 810000,
2484 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2485 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2486 };
2487 
2488 
2489 /* Also used for 38.4 MHz values. */
2490 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2491 	{ 540000,
2492 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2493 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2494 	{ 270000,
2495 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2496 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2497 	{ 162000,
2498 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2499 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2500 	{ 324000,
2501 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2502 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2503 	{ 216000,
2504 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2505 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2506 	{ 432000,
2507 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2508 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2509 	{ 648000,
2510 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2511 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 	{ 810000,
2513 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2514 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 };
2516 
2517 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2518 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2519 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2520 };
2521 
2522 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2523 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2524 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2525 };
2526 
2527 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2528 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2529 	/* the following params are unused */
2530 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2531 };
2532 
2533 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2534 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2535 	/* the following params are unused */
2536 };
2537 
2538 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2539 				  struct skl_wrpll_params *pll_params)
2540 {
2541 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2542 	const struct icl_combo_pll_params *params =
2543 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2544 		icl_dp_combo_pll_24MHz_values :
2545 		icl_dp_combo_pll_19_2MHz_values;
2546 	int clock = crtc_state->port_clock;
2547 	int i;
2548 
2549 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2550 		if (clock == params[i].clock) {
2551 			*pll_params = params[i].wrpll;
2552 			return true;
2553 		}
2554 	}
2555 
2556 	MISSING_CASE(clock);
2557 	return false;
2558 }
2559 
2560 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2561 			     struct skl_wrpll_params *pll_params)
2562 {
2563 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2564 
2565 	if (DISPLAY_VER(dev_priv) >= 12) {
2566 		switch (dev_priv->dpll.ref_clks.nssc) {
2567 		default:
2568 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2569 			fallthrough;
2570 		case 19200:
2571 		case 38400:
2572 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2573 			break;
2574 		case 24000:
2575 			*pll_params = tgl_tbt_pll_24MHz_values;
2576 			break;
2577 		}
2578 	} else {
2579 		switch (dev_priv->dpll.ref_clks.nssc) {
2580 		default:
2581 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2582 			fallthrough;
2583 		case 19200:
2584 		case 38400:
2585 			*pll_params = icl_tbt_pll_19_2MHz_values;
2586 			break;
2587 		case 24000:
2588 			*pll_params = icl_tbt_pll_24MHz_values;
2589 			break;
2590 		}
2591 	}
2592 
2593 	return true;
2594 }
2595 
2596 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2597 				    const struct intel_shared_dpll *pll,
2598 				    const struct intel_dpll_hw_state *pll_state)
2599 {
2600 	/*
2601 	 * The PLL outputs multiple frequencies at the same time, selection is
2602 	 * made at DDI clock mux level.
2603 	 */
2604 	drm_WARN_ON(&i915->drm, 1);
2605 
2606 	return 0;
2607 }
2608 
2609 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2610 {
2611 	int ref_clock = i915->dpll.ref_clks.nssc;
2612 
2613 	/*
2614 	 * For ICL+, the spec states: if reference frequency is 38.4,
2615 	 * use 19.2 because the DPLL automatically divides that by 2.
2616 	 */
2617 	if (ref_clock == 38400)
2618 		ref_clock = 19200;
2619 
2620 	return ref_clock;
2621 }
2622 
2623 static bool
2624 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2625 	       struct skl_wrpll_params *wrpll_params)
2626 {
2627 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2628 	int ref_clock = icl_wrpll_ref_clock(i915);
2629 	u32 afe_clock = crtc_state->port_clock * 5;
2630 	u32 dco_min = 7998000;
2631 	u32 dco_max = 10000000;
2632 	u32 dco_mid = (dco_min + dco_max) / 2;
2633 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2634 					 18, 20, 24, 28, 30, 32,  36,  40,
2635 					 42, 44, 48, 50, 52, 54,  56,  60,
2636 					 64, 66, 68, 70, 72, 76,  78,  80,
2637 					 84, 88, 90, 92, 96, 98, 100, 102,
2638 					  3,  5,  7,  9, 15, 21 };
2639 	u32 dco, best_dco = 0, dco_centrality = 0;
2640 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2641 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2642 
2643 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2644 		dco = afe_clock * dividers[d];
2645 
2646 		if (dco <= dco_max && dco >= dco_min) {
2647 			dco_centrality = abs(dco - dco_mid);
2648 
2649 			if (dco_centrality < best_dco_centrality) {
2650 				best_dco_centrality = dco_centrality;
2651 				best_div = dividers[d];
2652 				best_dco = dco;
2653 			}
2654 		}
2655 	}
2656 
2657 	if (best_div == 0)
2658 		return false;
2659 
2660 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2661 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2662 				  pdiv, qdiv, kdiv);
2663 
2664 	return true;
2665 }
2666 
2667 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2668 				      const struct intel_shared_dpll *pll,
2669 				      const struct intel_dpll_hw_state *pll_state)
2670 {
2671 	int ref_clock = icl_wrpll_ref_clock(i915);
2672 	u32 dco_fraction;
2673 	u32 p0, p1, p2, dco_freq;
2674 
2675 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2676 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2677 
2678 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2679 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2680 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2681 	else
2682 		p1 = 1;
2683 
2684 	switch (p0) {
2685 	case DPLL_CFGCR1_PDIV_2:
2686 		p0 = 2;
2687 		break;
2688 	case DPLL_CFGCR1_PDIV_3:
2689 		p0 = 3;
2690 		break;
2691 	case DPLL_CFGCR1_PDIV_5:
2692 		p0 = 5;
2693 		break;
2694 	case DPLL_CFGCR1_PDIV_7:
2695 		p0 = 7;
2696 		break;
2697 	}
2698 
2699 	switch (p2) {
2700 	case DPLL_CFGCR1_KDIV_1:
2701 		p2 = 1;
2702 		break;
2703 	case DPLL_CFGCR1_KDIV_2:
2704 		p2 = 2;
2705 		break;
2706 	case DPLL_CFGCR1_KDIV_3:
2707 		p2 = 3;
2708 		break;
2709 	}
2710 
2711 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2712 		   ref_clock;
2713 
2714 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2715 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2716 
2717 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2718 		dco_fraction *= 2;
2719 
2720 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2721 
2722 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2723 		return 0;
2724 
2725 	return dco_freq / (p0 * p1 * p2 * 5);
2726 }
2727 
2728 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2729 				const struct skl_wrpll_params *pll_params,
2730 				struct intel_dpll_hw_state *pll_state)
2731 {
2732 	u32 dco_fraction = pll_params->dco_fraction;
2733 
2734 	memset(pll_state, 0, sizeof(*pll_state));
2735 
2736 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2737 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2738 
2739 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2740 			    pll_params->dco_integer;
2741 
2742 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2743 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2744 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2745 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2746 
2747 	if (DISPLAY_VER(i915) >= 12)
2748 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2749 	else
2750 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2751 }
2752 
2753 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2754 				     u32 *target_dco_khz,
2755 				     struct intel_dpll_hw_state *state,
2756 				     bool is_dkl)
2757 {
2758 	u32 dco_min_freq, dco_max_freq;
2759 	int div1_vals[] = {7, 5, 3, 2};
2760 	unsigned int i;
2761 	int div2;
2762 
2763 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2764 	dco_max_freq = is_dp ? 8100000 : 10000000;
2765 
2766 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2767 		int div1 = div1_vals[i];
2768 
2769 		for (div2 = 10; div2 > 0; div2--) {
2770 			int dco = div1 * div2 * clock_khz * 5;
2771 			int a_divratio, tlinedrv, inputsel;
2772 			u32 hsdiv;
2773 
2774 			if (dco < dco_min_freq || dco > dco_max_freq)
2775 				continue;
2776 
2777 			if (div2 >= 2) {
2778 				/*
2779 				 * Note: a_divratio not matching TGL BSpec
2780 				 * algorithm but matching hardcoded values and
2781 				 * working on HW for DP alt-mode at least
2782 				 */
2783 				a_divratio = is_dp ? 10 : 5;
2784 				tlinedrv = is_dkl ? 1 : 2;
2785 			} else {
2786 				a_divratio = 5;
2787 				tlinedrv = 0;
2788 			}
2789 			inputsel = is_dp ? 0 : 1;
2790 
2791 			switch (div1) {
2792 			default:
2793 				MISSING_CASE(div1);
2794 				fallthrough;
2795 			case 2:
2796 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2797 				break;
2798 			case 3:
2799 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2800 				break;
2801 			case 5:
2802 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2803 				break;
2804 			case 7:
2805 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2806 				break;
2807 			}
2808 
2809 			*target_dco_khz = dco;
2810 
2811 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2812 
2813 			state->mg_clktop2_coreclkctl1 =
2814 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2815 
2816 			state->mg_clktop2_hsclkctl =
2817 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2818 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2819 				hsdiv |
2820 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2821 
2822 			return true;
2823 		}
2824 	}
2825 
2826 	return false;
2827 }
2828 
2829 /*
2830  * The specification for this function uses real numbers, so the math had to be
2831  * adapted to integer-only calculation, that's why it looks so different.
2832  */
2833 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2834 				  struct intel_dpll_hw_state *pll_state)
2835 {
2836 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2837 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2838 	int clock = crtc_state->port_clock;
2839 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2840 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2841 	u32 prop_coeff, int_coeff;
2842 	u32 tdc_targetcnt, feedfwgain;
2843 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2844 	u64 tmp;
2845 	bool use_ssc = false;
2846 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2847 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2848 
2849 	memset(pll_state, 0, sizeof(*pll_state));
2850 
2851 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2852 				      pll_state, is_dkl)) {
2853 		drm_dbg_kms(&dev_priv->drm,
2854 			    "Failed to find divisors for clock %d\n", clock);
2855 		return false;
2856 	}
2857 
2858 	m1div = 2;
2859 	m2div_int = dco_khz / (refclk_khz * m1div);
2860 	if (m2div_int > 255) {
2861 		if (!is_dkl) {
2862 			m1div = 4;
2863 			m2div_int = dco_khz / (refclk_khz * m1div);
2864 		}
2865 
2866 		if (m2div_int > 255) {
2867 			drm_dbg_kms(&dev_priv->drm,
2868 				    "Failed to find mdiv for clock %d\n",
2869 				    clock);
2870 			return false;
2871 		}
2872 	}
2873 	m2div_rem = dco_khz % (refclk_khz * m1div);
2874 
2875 	tmp = (u64)m2div_rem * (1 << 22);
2876 	do_div(tmp, refclk_khz * m1div);
2877 	m2div_frac = tmp;
2878 
2879 	switch (refclk_khz) {
2880 	case 19200:
2881 		iref_ndiv = 1;
2882 		iref_trim = 28;
2883 		iref_pulse_w = 1;
2884 		break;
2885 	case 24000:
2886 		iref_ndiv = 1;
2887 		iref_trim = 25;
2888 		iref_pulse_w = 2;
2889 		break;
2890 	case 38400:
2891 		iref_ndiv = 2;
2892 		iref_trim = 28;
2893 		iref_pulse_w = 1;
2894 		break;
2895 	default:
2896 		MISSING_CASE(refclk_khz);
2897 		return false;
2898 	}
2899 
2900 	/*
2901 	 * tdc_res = 0.000003
2902 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2903 	 *
2904 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2905 	 * was supposed to be a division, but we rearranged the operations of
2906 	 * the formula to avoid early divisions so we don't multiply the
2907 	 * rounding errors.
2908 	 *
2909 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2910 	 * we also rearrange to work with integers.
2911 	 *
2912 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2913 	 * last division by 10.
2914 	 */
2915 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2916 
2917 	/*
2918 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2919 	 * 32 bits. That's not a problem since we round the division down
2920 	 * anyway.
2921 	 */
2922 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2923 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2924 
2925 	if (dco_khz >= 9000000) {
2926 		prop_coeff = 5;
2927 		int_coeff = 10;
2928 	} else {
2929 		prop_coeff = 4;
2930 		int_coeff = 8;
2931 	}
2932 
2933 	if (use_ssc) {
2934 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2935 		do_div(tmp, refclk_khz * m1div * 10000);
2936 		ssc_stepsize = tmp;
2937 
2938 		tmp = mul_u32_u32(dco_khz, 1000);
2939 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2940 	} else {
2941 		ssc_stepsize = 0;
2942 		ssc_steplen = 0;
2943 	}
2944 	ssc_steplog = 4;
2945 
2946 	/* write pll_state calculations */
2947 	if (is_dkl) {
2948 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2949 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2950 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2951 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2952 
2953 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2954 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2955 
2956 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2957 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2958 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2959 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2960 
2961 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2962 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2963 
2964 		pll_state->mg_pll_tdc_coldst_bias =
2965 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2966 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2967 
2968 	} else {
2969 		pll_state->mg_pll_div0 =
2970 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2971 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2972 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2973 
2974 		pll_state->mg_pll_div1 =
2975 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2976 			MG_PLL_DIV1_DITHER_DIV_2 |
2977 			MG_PLL_DIV1_NDIVRATIO(1) |
2978 			MG_PLL_DIV1_FBPREDIV(m1div);
2979 
2980 		pll_state->mg_pll_lf =
2981 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2982 			MG_PLL_LF_AFCCNTSEL_512 |
2983 			MG_PLL_LF_GAINCTRL(1) |
2984 			MG_PLL_LF_INT_COEFF(int_coeff) |
2985 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2986 
2987 		pll_state->mg_pll_frac_lock =
2988 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2989 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2990 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2991 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2992 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2993 		if (use_ssc || m2div_rem > 0)
2994 			pll_state->mg_pll_frac_lock |=
2995 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2996 
2997 		pll_state->mg_pll_ssc =
2998 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2999 			MG_PLL_SSC_TYPE(2) |
3000 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3001 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3002 			MG_PLL_SSC_FLLEN |
3003 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3004 
3005 		pll_state->mg_pll_tdc_coldst_bias =
3006 			MG_PLL_TDC_COLDST_COLDSTART |
3007 			MG_PLL_TDC_COLDST_IREFINT_EN |
3008 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3009 			MG_PLL_TDC_TDCOVCCORR_EN |
3010 			MG_PLL_TDC_TDCSEL(3);
3011 
3012 		pll_state->mg_pll_bias =
3013 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3014 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3015 			MG_PLL_BIAS_BIAS_BONUS(10) |
3016 			MG_PLL_BIAS_BIASCAL_EN |
3017 			MG_PLL_BIAS_CTRIM(12) |
3018 			MG_PLL_BIAS_VREF_RDAC(4) |
3019 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3020 
3021 		if (refclk_khz == 38400) {
3022 			pll_state->mg_pll_tdc_coldst_bias_mask =
3023 				MG_PLL_TDC_COLDST_COLDSTART;
3024 			pll_state->mg_pll_bias_mask = 0;
3025 		} else {
3026 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3027 			pll_state->mg_pll_bias_mask = -1U;
3028 		}
3029 
3030 		pll_state->mg_pll_tdc_coldst_bias &=
3031 			pll_state->mg_pll_tdc_coldst_bias_mask;
3032 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3033 	}
3034 
3035 	return true;
3036 }
3037 
3038 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3039 				   const struct intel_shared_dpll *pll,
3040 				   const struct intel_dpll_hw_state *pll_state)
3041 {
3042 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3043 	u64 tmp;
3044 
3045 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3046 
3047 	if (DISPLAY_VER(dev_priv) >= 12) {
3048 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3049 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3050 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3051 
3052 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3053 			m2_frac = pll_state->mg_pll_bias &
3054 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3055 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3056 		} else {
3057 			m2_frac = 0;
3058 		}
3059 	} else {
3060 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3061 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3062 
3063 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3064 			m2_frac = pll_state->mg_pll_div0 &
3065 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3066 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3067 		} else {
3068 			m2_frac = 0;
3069 		}
3070 	}
3071 
3072 	switch (pll_state->mg_clktop2_hsclkctl &
3073 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3074 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3075 		div1 = 2;
3076 		break;
3077 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3078 		div1 = 3;
3079 		break;
3080 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3081 		div1 = 5;
3082 		break;
3083 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3084 		div1 = 7;
3085 		break;
3086 	default:
3087 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3088 		return 0;
3089 	}
3090 
3091 	div2 = (pll_state->mg_clktop2_hsclkctl &
3092 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3093 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3094 
3095 	/* div2 value of 0 is same as 1 means no div */
3096 	if (div2 == 0)
3097 		div2 = 1;
3098 
3099 	/*
3100 	 * Adjust the original formula to delay the division by 2^22 in order to
3101 	 * minimize possible rounding errors.
3102 	 */
3103 	tmp = (u64)m1 * m2_int * ref_clock +
3104 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3105 	tmp = div_u64(tmp, 5 * div1 * div2);
3106 
3107 	return tmp;
3108 }
3109 
3110 /**
3111  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3112  * @crtc_state: state for the CRTC to select the DPLL for
3113  * @port_dpll_id: the active @port_dpll_id to select
3114  *
3115  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3116  * CRTC.
3117  */
3118 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3119 			      enum icl_port_dpll_id port_dpll_id)
3120 {
3121 	struct icl_port_dpll *port_dpll =
3122 		&crtc_state->icl_port_dplls[port_dpll_id];
3123 
3124 	crtc_state->shared_dpll = port_dpll->pll;
3125 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3126 }
3127 
3128 static void icl_update_active_dpll(struct intel_atomic_state *state,
3129 				   struct intel_crtc *crtc,
3130 				   struct intel_encoder *encoder)
3131 {
3132 	struct intel_crtc_state *crtc_state =
3133 		intel_atomic_get_new_crtc_state(state, crtc);
3134 	struct intel_digital_port *primary_port;
3135 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3136 
3137 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3138 		enc_to_mst(encoder)->primary :
3139 		enc_to_dig_port(encoder);
3140 
3141 	if (primary_port &&
3142 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3143 	     intel_tc_port_in_legacy_mode(primary_port)))
3144 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3145 
3146 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3147 }
3148 
3149 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3150 {
3151 	if (!(i915->hti_state & HDPORT_ENABLED))
3152 		return 0;
3153 
3154 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3155 }
3156 
3157 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3158 				   struct intel_crtc *crtc,
3159 				   struct intel_encoder *encoder)
3160 {
3161 	struct intel_crtc_state *crtc_state =
3162 		intel_atomic_get_new_crtc_state(state, crtc);
3163 	struct skl_wrpll_params pll_params = { };
3164 	struct icl_port_dpll *port_dpll =
3165 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3166 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3167 	enum port port = encoder->port;
3168 	unsigned long dpll_mask;
3169 	int ret;
3170 
3171 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3172 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3173 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3174 	else
3175 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3176 
3177 	if (!ret) {
3178 		drm_dbg_kms(&dev_priv->drm,
3179 			    "Could not calculate combo PHY PLL state.\n");
3180 
3181 		return false;
3182 	}
3183 
3184 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3185 
3186 	if (IS_ALDERLAKE_S(dev_priv)) {
3187 		dpll_mask =
3188 			BIT(DPLL_ID_DG1_DPLL3) |
3189 			BIT(DPLL_ID_DG1_DPLL2) |
3190 			BIT(DPLL_ID_ICL_DPLL1) |
3191 			BIT(DPLL_ID_ICL_DPLL0);
3192 	} else if (IS_DG1(dev_priv)) {
3193 		if (port == PORT_D || port == PORT_E) {
3194 			dpll_mask =
3195 				BIT(DPLL_ID_DG1_DPLL2) |
3196 				BIT(DPLL_ID_DG1_DPLL3);
3197 		} else {
3198 			dpll_mask =
3199 				BIT(DPLL_ID_DG1_DPLL0) |
3200 				BIT(DPLL_ID_DG1_DPLL1);
3201 		}
3202 	} else if (IS_ROCKETLAKE(dev_priv)) {
3203 		dpll_mask =
3204 			BIT(DPLL_ID_EHL_DPLL4) |
3205 			BIT(DPLL_ID_ICL_DPLL1) |
3206 			BIT(DPLL_ID_ICL_DPLL0);
3207 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3208 		dpll_mask =
3209 			BIT(DPLL_ID_EHL_DPLL4) |
3210 			BIT(DPLL_ID_ICL_DPLL1) |
3211 			BIT(DPLL_ID_ICL_DPLL0);
3212 	} else {
3213 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3214 	}
3215 
3216 	/* Eliminate DPLLs from consideration if reserved by HTI */
3217 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3218 
3219 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3220 						&port_dpll->hw_state,
3221 						dpll_mask);
3222 	if (!port_dpll->pll) {
3223 		drm_dbg_kms(&dev_priv->drm,
3224 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3225 			    encoder->base.base.id, encoder->base.name);
3226 		return false;
3227 	}
3228 
3229 	intel_reference_shared_dpll(state, crtc,
3230 				    port_dpll->pll, &port_dpll->hw_state);
3231 
3232 	icl_update_active_dpll(state, crtc, encoder);
3233 
3234 	return true;
3235 }
3236 
3237 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3238 				 struct intel_crtc *crtc,
3239 				 struct intel_encoder *encoder)
3240 {
3241 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3242 	struct intel_crtc_state *crtc_state =
3243 		intel_atomic_get_new_crtc_state(state, crtc);
3244 	struct skl_wrpll_params pll_params = { };
3245 	struct icl_port_dpll *port_dpll;
3246 	enum intel_dpll_id dpll_id;
3247 
3248 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3249 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3250 		drm_dbg_kms(&dev_priv->drm,
3251 			    "Could not calculate TBT PLL state.\n");
3252 		return false;
3253 	}
3254 
3255 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3256 
3257 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3258 						&port_dpll->hw_state,
3259 						BIT(DPLL_ID_ICL_TBTPLL));
3260 	if (!port_dpll->pll) {
3261 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3262 		return false;
3263 	}
3264 	intel_reference_shared_dpll(state, crtc,
3265 				    port_dpll->pll, &port_dpll->hw_state);
3266 
3267 
3268 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3269 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3270 		drm_dbg_kms(&dev_priv->drm,
3271 			    "Could not calculate MG PHY PLL state.\n");
3272 		goto err_unreference_tbt_pll;
3273 	}
3274 
3275 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3276 							 encoder->port));
3277 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3278 						&port_dpll->hw_state,
3279 						BIT(dpll_id));
3280 	if (!port_dpll->pll) {
3281 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3282 		goto err_unreference_tbt_pll;
3283 	}
3284 	intel_reference_shared_dpll(state, crtc,
3285 				    port_dpll->pll, &port_dpll->hw_state);
3286 
3287 	icl_update_active_dpll(state, crtc, encoder);
3288 
3289 	return true;
3290 
3291 err_unreference_tbt_pll:
3292 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3293 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3294 
3295 	return false;
3296 }
3297 
3298 static bool icl_get_dplls(struct intel_atomic_state *state,
3299 			  struct intel_crtc *crtc,
3300 			  struct intel_encoder *encoder)
3301 {
3302 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3303 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3304 
3305 	if (intel_phy_is_combo(dev_priv, phy))
3306 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3307 	else if (intel_phy_is_tc(dev_priv, phy))
3308 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3309 
3310 	MISSING_CASE(phy);
3311 
3312 	return false;
3313 }
3314 
3315 static void icl_put_dplls(struct intel_atomic_state *state,
3316 			  struct intel_crtc *crtc)
3317 {
3318 	const struct intel_crtc_state *old_crtc_state =
3319 		intel_atomic_get_old_crtc_state(state, crtc);
3320 	struct intel_crtc_state *new_crtc_state =
3321 		intel_atomic_get_new_crtc_state(state, crtc);
3322 	enum icl_port_dpll_id id;
3323 
3324 	new_crtc_state->shared_dpll = NULL;
3325 
3326 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3327 		const struct icl_port_dpll *old_port_dpll =
3328 			&old_crtc_state->icl_port_dplls[id];
3329 		struct icl_port_dpll *new_port_dpll =
3330 			&new_crtc_state->icl_port_dplls[id];
3331 
3332 		new_port_dpll->pll = NULL;
3333 
3334 		if (!old_port_dpll->pll)
3335 			continue;
3336 
3337 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3338 	}
3339 }
3340 
3341 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3342 				struct intel_shared_dpll *pll,
3343 				struct intel_dpll_hw_state *hw_state)
3344 {
3345 	const enum intel_dpll_id id = pll->info->id;
3346 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3347 	intel_wakeref_t wakeref;
3348 	bool ret = false;
3349 	u32 val;
3350 
3351 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3352 
3353 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3354 						     POWER_DOMAIN_DISPLAY_CORE);
3355 	if (!wakeref)
3356 		return false;
3357 
3358 	val = intel_de_read(dev_priv, enable_reg);
3359 	if (!(val & PLL_ENABLE))
3360 		goto out;
3361 
3362 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3363 						  MG_REFCLKIN_CTL(tc_port));
3364 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3365 
3366 	hw_state->mg_clktop2_coreclkctl1 =
3367 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3368 	hw_state->mg_clktop2_coreclkctl1 &=
3369 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3370 
3371 	hw_state->mg_clktop2_hsclkctl =
3372 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3373 	hw_state->mg_clktop2_hsclkctl &=
3374 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3375 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3376 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3377 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3378 
3379 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3380 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3381 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3382 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3383 						   MG_PLL_FRAC_LOCK(tc_port));
3384 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3385 
3386 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3387 	hw_state->mg_pll_tdc_coldst_bias =
3388 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3389 
3390 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3391 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3392 		hw_state->mg_pll_bias_mask = 0;
3393 	} else {
3394 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3395 		hw_state->mg_pll_bias_mask = -1U;
3396 	}
3397 
3398 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3399 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3400 
3401 	ret = true;
3402 out:
3403 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3404 	return ret;
3405 }
3406 
3407 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3408 				 struct intel_shared_dpll *pll,
3409 				 struct intel_dpll_hw_state *hw_state)
3410 {
3411 	const enum intel_dpll_id id = pll->info->id;
3412 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3413 	intel_wakeref_t wakeref;
3414 	bool ret = false;
3415 	u32 val;
3416 
3417 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3418 						     POWER_DOMAIN_DISPLAY_CORE);
3419 	if (!wakeref)
3420 		return false;
3421 
3422 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3423 	if (!(val & PLL_ENABLE))
3424 		goto out;
3425 
3426 	/*
3427 	 * All registers read here have the same HIP_INDEX_REG even though
3428 	 * they are on different building blocks
3429 	 */
3430 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3431 		       HIP_INDEX_VAL(tc_port, 0x2));
3432 
3433 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3434 						  DKL_REFCLKIN_CTL(tc_port));
3435 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3436 
3437 	hw_state->mg_clktop2_hsclkctl =
3438 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3439 	hw_state->mg_clktop2_hsclkctl &=
3440 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3441 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3442 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3443 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3444 
3445 	hw_state->mg_clktop2_coreclkctl1 =
3446 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3447 	hw_state->mg_clktop2_coreclkctl1 &=
3448 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3449 
3450 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3451 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3452 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3453 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3454 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3455 
3456 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3457 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3458 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3459 
3460 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3461 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3462 				 DKL_PLL_SSC_STEP_LEN_MASK |
3463 				 DKL_PLL_SSC_STEP_NUM_MASK |
3464 				 DKL_PLL_SSC_EN);
3465 
3466 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3467 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3468 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3469 
3470 	hw_state->mg_pll_tdc_coldst_bias =
3471 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3472 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3473 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3474 
3475 	ret = true;
3476 out:
3477 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3478 	return ret;
3479 }
3480 
3481 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3482 				 struct intel_shared_dpll *pll,
3483 				 struct intel_dpll_hw_state *hw_state,
3484 				 i915_reg_t enable_reg)
3485 {
3486 	const enum intel_dpll_id id = pll->info->id;
3487 	intel_wakeref_t wakeref;
3488 	bool ret = false;
3489 	u32 val;
3490 
3491 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3492 						     POWER_DOMAIN_DISPLAY_CORE);
3493 	if (!wakeref)
3494 		return false;
3495 
3496 	val = intel_de_read(dev_priv, enable_reg);
3497 	if (!(val & PLL_ENABLE))
3498 		goto out;
3499 
3500 	if (IS_ALDERLAKE_S(dev_priv)) {
3501 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3502 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3503 	} else if (IS_DG1(dev_priv)) {
3504 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3505 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3506 	} else if (IS_ROCKETLAKE(dev_priv)) {
3507 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3508 						 RKL_DPLL_CFGCR0(id));
3509 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3510 						 RKL_DPLL_CFGCR1(id));
3511 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3512 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3513 						 TGL_DPLL_CFGCR0(id));
3514 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3515 						 TGL_DPLL_CFGCR1(id));
3516 	} else {
3517 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3518 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3519 							 ICL_DPLL_CFGCR0(4));
3520 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3521 							 ICL_DPLL_CFGCR1(4));
3522 		} else {
3523 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3524 							 ICL_DPLL_CFGCR0(id));
3525 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3526 							 ICL_DPLL_CFGCR1(id));
3527 		}
3528 	}
3529 
3530 	ret = true;
3531 out:
3532 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3533 	return ret;
3534 }
3535 
3536 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3537 				   struct intel_shared_dpll *pll,
3538 				   struct intel_dpll_hw_state *hw_state)
3539 {
3540 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3541 
3542 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3543 }
3544 
3545 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3546 				 struct intel_shared_dpll *pll,
3547 				 struct intel_dpll_hw_state *hw_state)
3548 {
3549 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3550 }
3551 
3552 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3553 			   struct intel_shared_dpll *pll)
3554 {
3555 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3556 	const enum intel_dpll_id id = pll->info->id;
3557 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3558 
3559 	if (IS_ALDERLAKE_S(dev_priv)) {
3560 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3561 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3562 	} else if (IS_DG1(dev_priv)) {
3563 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3564 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3565 	} else if (IS_ROCKETLAKE(dev_priv)) {
3566 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3567 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3568 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3569 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3570 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3571 	} else {
3572 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3573 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3574 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3575 		} else {
3576 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3577 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3578 		}
3579 	}
3580 
3581 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3582 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3583 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3584 }
3585 
3586 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3587 			     struct intel_shared_dpll *pll)
3588 {
3589 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3590 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3591 	u32 val;
3592 
3593 	/*
3594 	 * Some of the following registers have reserved fields, so program
3595 	 * these with RMW based on a mask. The mask can be fixed or generated
3596 	 * during the calc/readout phase if the mask depends on some other HW
3597 	 * state like refclk, see icl_calc_mg_pll_state().
3598 	 */
3599 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3600 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3601 	val |= hw_state->mg_refclkin_ctl;
3602 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3603 
3604 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3605 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3606 	val |= hw_state->mg_clktop2_coreclkctl1;
3607 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3608 
3609 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3610 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3611 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3612 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3613 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3614 	val |= hw_state->mg_clktop2_hsclkctl;
3615 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3616 
3617 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3618 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3619 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3620 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3621 		       hw_state->mg_pll_frac_lock);
3622 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3623 
3624 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3625 	val &= ~hw_state->mg_pll_bias_mask;
3626 	val |= hw_state->mg_pll_bias;
3627 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3628 
3629 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3630 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3631 	val |= hw_state->mg_pll_tdc_coldst_bias;
3632 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3633 
3634 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3635 }
3636 
3637 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3638 			  struct intel_shared_dpll *pll)
3639 {
3640 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3641 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3642 	u32 val;
3643 
3644 	/*
3645 	 * All registers programmed here have the same HIP_INDEX_REG even
3646 	 * though on different building block
3647 	 */
3648 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3649 		       HIP_INDEX_VAL(tc_port, 0x2));
3650 
3651 	/* All the registers are RMW */
3652 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3653 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3654 	val |= hw_state->mg_refclkin_ctl;
3655 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3656 
3657 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3658 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3659 	val |= hw_state->mg_clktop2_coreclkctl1;
3660 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3661 
3662 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3663 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3664 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3665 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3666 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3667 	val |= hw_state->mg_clktop2_hsclkctl;
3668 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3669 
3670 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3671 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3672 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3673 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3674 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3675 	val |= hw_state->mg_pll_div0;
3676 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3677 
3678 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3679 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3680 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3681 	val |= hw_state->mg_pll_div1;
3682 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3683 
3684 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3685 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3686 		 DKL_PLL_SSC_STEP_LEN_MASK |
3687 		 DKL_PLL_SSC_STEP_NUM_MASK |
3688 		 DKL_PLL_SSC_EN);
3689 	val |= hw_state->mg_pll_ssc;
3690 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3691 
3692 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3693 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3694 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3695 	val |= hw_state->mg_pll_bias;
3696 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3697 
3698 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3699 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3700 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3701 	val |= hw_state->mg_pll_tdc_coldst_bias;
3702 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3703 
3704 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3705 }
3706 
3707 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3708 				 struct intel_shared_dpll *pll,
3709 				 i915_reg_t enable_reg)
3710 {
3711 	u32 val;
3712 
3713 	val = intel_de_read(dev_priv, enable_reg);
3714 	val |= PLL_POWER_ENABLE;
3715 	intel_de_write(dev_priv, enable_reg, val);
3716 
3717 	/*
3718 	 * The spec says we need to "wait" but it also says it should be
3719 	 * immediate.
3720 	 */
3721 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3722 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3723 			pll->info->id);
3724 }
3725 
3726 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3727 			   struct intel_shared_dpll *pll,
3728 			   i915_reg_t enable_reg)
3729 {
3730 	u32 val;
3731 
3732 	val = intel_de_read(dev_priv, enable_reg);
3733 	val |= PLL_ENABLE;
3734 	intel_de_write(dev_priv, enable_reg, val);
3735 
3736 	/* Timeout is actually 600us. */
3737 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3738 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3739 }
3740 
3741 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3742 {
3743 	u32 val;
3744 
3745 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3746 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3747 		return;
3748 	/*
3749 	 * Wa_16011069516:adl-p[a0]
3750 	 *
3751 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3752 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3753 	 * sanity check this assumption with a double read, which presumably
3754 	 * returns the correct value even with clock gating on.
3755 	 *
3756 	 * Instead of the usual place for workarounds we apply this one here,
3757 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3758 	 */
3759 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3760 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3761 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3762 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3763 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3764 }
3765 
3766 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3767 			     struct intel_shared_dpll *pll)
3768 {
3769 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3770 
3771 	if (IS_JSL_EHL(dev_priv) &&
3772 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3773 
3774 		/*
3775 		 * We need to disable DC states when this DPLL is enabled.
3776 		 * This can be done by taking a reference on DPLL4 power
3777 		 * domain.
3778 		 */
3779 		pll->wakeref = intel_display_power_get(dev_priv,
3780 						       POWER_DOMAIN_DC_OFF);
3781 	}
3782 
3783 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3784 
3785 	icl_dpll_write(dev_priv, pll);
3786 
3787 	/*
3788 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3789 	 * paths should already be setting the appropriate voltage, hence we do
3790 	 * nothing here.
3791 	 */
3792 
3793 	icl_pll_enable(dev_priv, pll, enable_reg);
3794 
3795 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3796 
3797 	/* DVFS post sequence would be here. See the comment above. */
3798 }
3799 
3800 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3801 			   struct intel_shared_dpll *pll)
3802 {
3803 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3804 
3805 	icl_dpll_write(dev_priv, pll);
3806 
3807 	/*
3808 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3809 	 * paths should already be setting the appropriate voltage, hence we do
3810 	 * nothing here.
3811 	 */
3812 
3813 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3814 
3815 	/* DVFS post sequence would be here. See the comment above. */
3816 }
3817 
3818 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3819 			  struct intel_shared_dpll *pll)
3820 {
3821 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3822 
3823 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3824 
3825 	if (DISPLAY_VER(dev_priv) >= 12)
3826 		dkl_pll_write(dev_priv, pll);
3827 	else
3828 		icl_mg_pll_write(dev_priv, pll);
3829 
3830 	/*
3831 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3832 	 * paths should already be setting the appropriate voltage, hence we do
3833 	 * nothing here.
3834 	 */
3835 
3836 	icl_pll_enable(dev_priv, pll, enable_reg);
3837 
3838 	/* DVFS post sequence would be here. See the comment above. */
3839 }
3840 
3841 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3842 			    struct intel_shared_dpll *pll,
3843 			    i915_reg_t enable_reg)
3844 {
3845 	u32 val;
3846 
3847 	/* The first steps are done by intel_ddi_post_disable(). */
3848 
3849 	/*
3850 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3851 	 * paths should already be setting the appropriate voltage, hence we do
3852 	 * nothing here.
3853 	 */
3854 
3855 	val = intel_de_read(dev_priv, enable_reg);
3856 	val &= ~PLL_ENABLE;
3857 	intel_de_write(dev_priv, enable_reg, val);
3858 
3859 	/* Timeout is actually 1us. */
3860 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3861 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3862 
3863 	/* DVFS post sequence would be here. See the comment above. */
3864 
3865 	val = intel_de_read(dev_priv, enable_reg);
3866 	val &= ~PLL_POWER_ENABLE;
3867 	intel_de_write(dev_priv, enable_reg, val);
3868 
3869 	/*
3870 	 * The spec says we need to "wait" but it also says it should be
3871 	 * immediate.
3872 	 */
3873 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3874 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3875 			pll->info->id);
3876 }
3877 
3878 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3879 			      struct intel_shared_dpll *pll)
3880 {
3881 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3882 
3883 	icl_pll_disable(dev_priv, pll, enable_reg);
3884 
3885 	if (IS_JSL_EHL(dev_priv) &&
3886 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3887 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3888 					pll->wakeref);
3889 }
3890 
3891 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3892 			    struct intel_shared_dpll *pll)
3893 {
3894 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3895 }
3896 
3897 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3898 			   struct intel_shared_dpll *pll)
3899 {
3900 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3901 
3902 	icl_pll_disable(dev_priv, pll, enable_reg);
3903 }
3904 
3905 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3906 {
3907 	/* No SSC ref */
3908 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3909 }
3910 
3911 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3912 			      const struct intel_dpll_hw_state *hw_state)
3913 {
3914 	drm_dbg_kms(&dev_priv->drm,
3915 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3916 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3917 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3918 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3919 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3920 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3921 		    hw_state->cfgcr0, hw_state->cfgcr1,
3922 		    hw_state->mg_refclkin_ctl,
3923 		    hw_state->mg_clktop2_coreclkctl1,
3924 		    hw_state->mg_clktop2_hsclkctl,
3925 		    hw_state->mg_pll_div0,
3926 		    hw_state->mg_pll_div1,
3927 		    hw_state->mg_pll_lf,
3928 		    hw_state->mg_pll_frac_lock,
3929 		    hw_state->mg_pll_ssc,
3930 		    hw_state->mg_pll_bias,
3931 		    hw_state->mg_pll_tdc_coldst_bias);
3932 }
3933 
3934 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3935 	.enable = combo_pll_enable,
3936 	.disable = combo_pll_disable,
3937 	.get_hw_state = combo_pll_get_hw_state,
3938 	.get_freq = icl_ddi_combo_pll_get_freq,
3939 };
3940 
3941 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3942 	.enable = tbt_pll_enable,
3943 	.disable = tbt_pll_disable,
3944 	.get_hw_state = tbt_pll_get_hw_state,
3945 	.get_freq = icl_ddi_tbt_pll_get_freq,
3946 };
3947 
3948 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3949 	.enable = mg_pll_enable,
3950 	.disable = mg_pll_disable,
3951 	.get_hw_state = mg_pll_get_hw_state,
3952 	.get_freq = icl_ddi_mg_pll_get_freq,
3953 };
3954 
3955 static const struct dpll_info icl_plls[] = {
3956 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3957 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3958 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3959 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3960 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3961 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3962 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3963 	{ },
3964 };
3965 
3966 static const struct intel_dpll_mgr icl_pll_mgr = {
3967 	.dpll_info = icl_plls,
3968 	.get_dplls = icl_get_dplls,
3969 	.put_dplls = icl_put_dplls,
3970 	.update_active_dpll = icl_update_active_dpll,
3971 	.update_ref_clks = icl_update_dpll_ref_clks,
3972 	.dump_hw_state = icl_dump_hw_state,
3973 };
3974 
3975 static const struct dpll_info ehl_plls[] = {
3976 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3977 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3978 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3979 	{ },
3980 };
3981 
3982 static const struct intel_dpll_mgr ehl_pll_mgr = {
3983 	.dpll_info = ehl_plls,
3984 	.get_dplls = icl_get_dplls,
3985 	.put_dplls = icl_put_dplls,
3986 	.update_ref_clks = icl_update_dpll_ref_clks,
3987 	.dump_hw_state = icl_dump_hw_state,
3988 };
3989 
3990 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3991 	.enable = mg_pll_enable,
3992 	.disable = mg_pll_disable,
3993 	.get_hw_state = dkl_pll_get_hw_state,
3994 	.get_freq = icl_ddi_mg_pll_get_freq,
3995 };
3996 
3997 static const struct dpll_info tgl_plls[] = {
3998 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3999 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4000 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4001 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4002 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4003 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4004 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4005 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4006 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4007 	{ },
4008 };
4009 
4010 static const struct intel_dpll_mgr tgl_pll_mgr = {
4011 	.dpll_info = tgl_plls,
4012 	.get_dplls = icl_get_dplls,
4013 	.put_dplls = icl_put_dplls,
4014 	.update_active_dpll = icl_update_active_dpll,
4015 	.update_ref_clks = icl_update_dpll_ref_clks,
4016 	.dump_hw_state = icl_dump_hw_state,
4017 };
4018 
4019 static const struct dpll_info rkl_plls[] = {
4020 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4021 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4022 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4023 	{ },
4024 };
4025 
4026 static const struct intel_dpll_mgr rkl_pll_mgr = {
4027 	.dpll_info = rkl_plls,
4028 	.get_dplls = icl_get_dplls,
4029 	.put_dplls = icl_put_dplls,
4030 	.update_ref_clks = icl_update_dpll_ref_clks,
4031 	.dump_hw_state = icl_dump_hw_state,
4032 };
4033 
4034 static const struct dpll_info dg1_plls[] = {
4035 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4036 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4037 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4038 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4039 	{ },
4040 };
4041 
4042 static const struct intel_dpll_mgr dg1_pll_mgr = {
4043 	.dpll_info = dg1_plls,
4044 	.get_dplls = icl_get_dplls,
4045 	.put_dplls = icl_put_dplls,
4046 	.update_ref_clks = icl_update_dpll_ref_clks,
4047 	.dump_hw_state = icl_dump_hw_state,
4048 };
4049 
4050 static const struct dpll_info adls_plls[] = {
4051 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4052 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4053 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4054 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4055 	{ },
4056 };
4057 
4058 static const struct intel_dpll_mgr adls_pll_mgr = {
4059 	.dpll_info = adls_plls,
4060 	.get_dplls = icl_get_dplls,
4061 	.put_dplls = icl_put_dplls,
4062 	.update_ref_clks = icl_update_dpll_ref_clks,
4063 	.dump_hw_state = icl_dump_hw_state,
4064 };
4065 
4066 static const struct dpll_info adlp_plls[] = {
4067 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4068 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4069 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4070 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4071 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4072 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4073 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4074 	{ },
4075 };
4076 
4077 static const struct intel_dpll_mgr adlp_pll_mgr = {
4078 	.dpll_info = adlp_plls,
4079 	.get_dplls = icl_get_dplls,
4080 	.put_dplls = icl_put_dplls,
4081 	.update_active_dpll = icl_update_active_dpll,
4082 	.update_ref_clks = icl_update_dpll_ref_clks,
4083 	.dump_hw_state = icl_dump_hw_state,
4084 };
4085 
4086 /**
4087  * intel_shared_dpll_init - Initialize shared DPLLs
4088  * @dev: drm device
4089  *
4090  * Initialize shared DPLLs for @dev.
4091  */
4092 void intel_shared_dpll_init(struct drm_device *dev)
4093 {
4094 	struct drm_i915_private *dev_priv = to_i915(dev);
4095 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4096 	const struct dpll_info *dpll_info;
4097 	int i;
4098 
4099 	if (IS_DG2(dev_priv))
4100 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4101 		dpll_mgr = NULL;
4102 	else if (IS_ALDERLAKE_P(dev_priv))
4103 		dpll_mgr = &adlp_pll_mgr;
4104 	else if (IS_ALDERLAKE_S(dev_priv))
4105 		dpll_mgr = &adls_pll_mgr;
4106 	else if (IS_DG1(dev_priv))
4107 		dpll_mgr = &dg1_pll_mgr;
4108 	else if (IS_ROCKETLAKE(dev_priv))
4109 		dpll_mgr = &rkl_pll_mgr;
4110 	else if (DISPLAY_VER(dev_priv) >= 12)
4111 		dpll_mgr = &tgl_pll_mgr;
4112 	else if (IS_JSL_EHL(dev_priv))
4113 		dpll_mgr = &ehl_pll_mgr;
4114 	else if (DISPLAY_VER(dev_priv) >= 11)
4115 		dpll_mgr = &icl_pll_mgr;
4116 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4117 		dpll_mgr = &bxt_pll_mgr;
4118 	else if (DISPLAY_VER(dev_priv) == 9)
4119 		dpll_mgr = &skl_pll_mgr;
4120 	else if (HAS_DDI(dev_priv))
4121 		dpll_mgr = &hsw_pll_mgr;
4122 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4123 		dpll_mgr = &pch_pll_mgr;
4124 
4125 	if (!dpll_mgr) {
4126 		dev_priv->dpll.num_shared_dpll = 0;
4127 		return;
4128 	}
4129 
4130 	dpll_info = dpll_mgr->dpll_info;
4131 
4132 	for (i = 0; dpll_info[i].name; i++) {
4133 		drm_WARN_ON(dev, i != dpll_info[i].id);
4134 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4135 	}
4136 
4137 	dev_priv->dpll.mgr = dpll_mgr;
4138 	dev_priv->dpll.num_shared_dpll = i;
4139 	mutex_init(&dev_priv->dpll.lock);
4140 
4141 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4142 }
4143 
4144 /**
4145  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4146  * @state: atomic state
4147  * @crtc: CRTC to reserve DPLLs for
4148  * @encoder: encoder
4149  *
4150  * This function reserves all required DPLLs for the given CRTC and encoder
4151  * combination in the current atomic commit @state and the new @crtc atomic
4152  * state.
4153  *
4154  * The new configuration in the atomic commit @state is made effective by
4155  * calling intel_shared_dpll_swap_state().
4156  *
4157  * The reserved DPLLs should be released by calling
4158  * intel_release_shared_dplls().
4159  *
4160  * Returns:
4161  * True if all required DPLLs were successfully reserved.
4162  */
4163 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4164 				struct intel_crtc *crtc,
4165 				struct intel_encoder *encoder)
4166 {
4167 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4168 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4169 
4170 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4171 		return false;
4172 
4173 	return dpll_mgr->get_dplls(state, crtc, encoder);
4174 }
4175 
4176 /**
4177  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4178  * @state: atomic state
4179  * @crtc: crtc from which the DPLLs are to be released
4180  *
4181  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4182  * from the current atomic commit @state and the old @crtc atomic state.
4183  *
4184  * The new configuration in the atomic commit @state is made effective by
4185  * calling intel_shared_dpll_swap_state().
4186  */
4187 void intel_release_shared_dplls(struct intel_atomic_state *state,
4188 				struct intel_crtc *crtc)
4189 {
4190 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4191 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4192 
4193 	/*
4194 	 * FIXME: this function is called for every platform having a
4195 	 * compute_clock hook, even though the platform doesn't yet support
4196 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4197 	 * called on those.
4198 	 */
4199 	if (!dpll_mgr)
4200 		return;
4201 
4202 	dpll_mgr->put_dplls(state, crtc);
4203 }
4204 
4205 /**
4206  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4207  * @state: atomic state
4208  * @crtc: the CRTC for which to update the active DPLL
4209  * @encoder: encoder determining the type of port DPLL
4210  *
4211  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4212  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4213  * DPLL selected will be based on the current mode of the encoder's port.
4214  */
4215 void intel_update_active_dpll(struct intel_atomic_state *state,
4216 			      struct intel_crtc *crtc,
4217 			      struct intel_encoder *encoder)
4218 {
4219 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4220 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4221 
4222 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4223 		return;
4224 
4225 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4226 }
4227 
4228 /**
4229  * intel_dpll_get_freq - calculate the DPLL's output frequency
4230  * @i915: i915 device
4231  * @pll: DPLL for which to calculate the output frequency
4232  * @pll_state: DPLL state from which to calculate the output frequency
4233  *
4234  * Return the output frequency corresponding to @pll's passed in @pll_state.
4235  */
4236 int intel_dpll_get_freq(struct drm_i915_private *i915,
4237 			const struct intel_shared_dpll *pll,
4238 			const struct intel_dpll_hw_state *pll_state)
4239 {
4240 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4241 		return 0;
4242 
4243 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4244 }
4245 
4246 /**
4247  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4248  * @i915: i915 device
4249  * @pll: DPLL for which to calculate the output frequency
4250  * @hw_state: DPLL's hardware state
4251  *
4252  * Read out @pll's hardware state into @hw_state.
4253  */
4254 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4255 			     struct intel_shared_dpll *pll,
4256 			     struct intel_dpll_hw_state *hw_state)
4257 {
4258 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4259 }
4260 
4261 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4262 				  struct intel_shared_dpll *pll)
4263 {
4264 	struct intel_crtc *crtc;
4265 
4266 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4267 
4268 	if (IS_JSL_EHL(i915) && pll->on &&
4269 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4270 		pll->wakeref = intel_display_power_get(i915,
4271 						       POWER_DOMAIN_DC_OFF);
4272 	}
4273 
4274 	pll->state.pipe_mask = 0;
4275 	for_each_intel_crtc(&i915->drm, crtc) {
4276 		struct intel_crtc_state *crtc_state =
4277 			to_intel_crtc_state(crtc->base.state);
4278 
4279 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4280 			pll->state.pipe_mask |= BIT(crtc->pipe);
4281 	}
4282 	pll->active_mask = pll->state.pipe_mask;
4283 
4284 	drm_dbg_kms(&i915->drm,
4285 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4286 		    pll->info->name, pll->state.pipe_mask, pll->on);
4287 }
4288 
4289 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4290 {
4291 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4292 		i915->dpll.mgr->update_ref_clks(i915);
4293 }
4294 
4295 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4296 {
4297 	int i;
4298 
4299 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4300 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4301 }
4302 
4303 static void sanitize_dpll_state(struct drm_i915_private *i915,
4304 				struct intel_shared_dpll *pll)
4305 {
4306 	if (!pll->on)
4307 		return;
4308 
4309 	adlp_cmtg_clock_gating_wa(i915, pll);
4310 
4311 	if (pll->active_mask)
4312 		return;
4313 
4314 	drm_dbg_kms(&i915->drm,
4315 		    "%s enabled but not in use, disabling\n",
4316 		    pll->info->name);
4317 
4318 	pll->info->funcs->disable(i915, pll);
4319 	pll->on = false;
4320 }
4321 
4322 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4323 {
4324 	int i;
4325 
4326 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4327 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4328 }
4329 
4330 /**
4331  * intel_dpll_dump_hw_state - write hw_state to dmesg
4332  * @dev_priv: i915 drm device
4333  * @hw_state: hw state to be written to the log
4334  *
4335  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4336  */
4337 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4338 			      const struct intel_dpll_hw_state *hw_state)
4339 {
4340 	if (dev_priv->dpll.mgr) {
4341 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4342 	} else {
4343 		/* fallback for platforms that don't use the shared dpll
4344 		 * infrastructure
4345 		 */
4346 		drm_dbg_kms(&dev_priv->drm,
4347 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4348 			    "fp0: 0x%x, fp1: 0x%x\n",
4349 			    hw_state->dpll,
4350 			    hw_state->dpll_md,
4351 			    hw_state->fp0,
4352 			    hw_state->fp1);
4353 	}
4354 }
4355