1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "intel_de.h"
27 #include "intel_display_types.h"
28 #include "intel_dpio_phy.h"
29 #include "intel_dpll.h"
30 #include "intel_dpll_mgr.h"
31 #include "intel_pch_refclk.h"
32 #include "intel_tc.h"
33 #include "intel_tc_phy_regs.h"
34 
35 /**
36  * DOC: Display PLLs
37  *
38  * Display PLLs used for driving outputs vary by platform. While some have
39  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
40  * from a pool. In the latter scenario, it is possible that multiple pipes
41  * share a PLL if their configurations match.
42  *
43  * This file provides an abstraction over display PLLs. The function
44  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
45  * users of a PLL are tracked and that tracking is integrated with the atomic
46  * modset interface. During an atomic operation, required PLLs can be reserved
47  * for a given CRTC and encoder configuration by calling
48  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
49  * with intel_release_shared_dplls().
50  * Changes to the users are first staged in the atomic state, and then made
51  * effective by calling intel_shared_dpll_swap_state() during the atomic
52  * commit phase.
53  */
54 
55 /* platform specific hooks for managing DPLLs */
56 struct intel_shared_dpll_funcs {
57 	/*
58 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
59 	 * the pll is not already enabled.
60 	 */
61 	void (*enable)(struct drm_i915_private *i915,
62 		       struct intel_shared_dpll *pll);
63 
64 	/*
65 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
66 	 * only when it is safe to disable the pll, i.e., there are no more
67 	 * tracked users for it.
68 	 */
69 	void (*disable)(struct drm_i915_private *i915,
70 			struct intel_shared_dpll *pll);
71 
72 	/*
73 	 * Hook for reading the values currently programmed to the DPLL
74 	 * registers. This is used for initial hw state readout and state
75 	 * verification after a mode set.
76 	 */
77 	bool (*get_hw_state)(struct drm_i915_private *i915,
78 			     struct intel_shared_dpll *pll,
79 			     struct intel_dpll_hw_state *hw_state);
80 
81 	/*
82 	 * Hook for calculating the pll's output frequency based on its passed
83 	 * in state.
84 	 */
85 	int (*get_freq)(struct drm_i915_private *i915,
86 			const struct intel_shared_dpll *pll,
87 			const struct intel_dpll_hw_state *pll_state);
88 };
89 
90 struct intel_dpll_mgr {
91 	const struct dpll_info *dpll_info;
92 
93 	bool (*get_dplls)(struct intel_atomic_state *state,
94 			  struct intel_crtc *crtc,
95 			  struct intel_encoder *encoder);
96 	void (*put_dplls)(struct intel_atomic_state *state,
97 			  struct intel_crtc *crtc);
98 	void (*update_active_dpll)(struct intel_atomic_state *state,
99 				   struct intel_crtc *crtc,
100 				   struct intel_encoder *encoder);
101 	void (*update_ref_clks)(struct drm_i915_private *i915);
102 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
103 			      const struct intel_dpll_hw_state *hw_state);
104 };
105 
106 static void
107 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
108 				  struct intel_shared_dpll_state *shared_dpll)
109 {
110 	enum intel_dpll_id i;
111 
112 	/* Copy shared dpll state */
113 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
114 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
115 
116 		shared_dpll[i] = pll->state;
117 	}
118 }
119 
120 static struct intel_shared_dpll_state *
121 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
122 {
123 	struct intel_atomic_state *state = to_intel_atomic_state(s);
124 
125 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
126 
127 	if (!state->dpll_set) {
128 		state->dpll_set = true;
129 
130 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
131 						  state->shared_dpll);
132 	}
133 
134 	return state->shared_dpll;
135 }
136 
137 /**
138  * intel_get_shared_dpll_by_id - get a DPLL given its id
139  * @dev_priv: i915 device instance
140  * @id: pll id
141  *
142  * Returns:
143  * A pointer to the DPLL with @id
144  */
145 struct intel_shared_dpll *
146 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
147 			    enum intel_dpll_id id)
148 {
149 	return &dev_priv->dpll.shared_dplls[id];
150 }
151 
152 /**
153  * intel_get_shared_dpll_id - get the id of a DPLL
154  * @dev_priv: i915 device instance
155  * @pll: the DPLL
156  *
157  * Returns:
158  * The id of @pll
159  */
160 enum intel_dpll_id
161 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
162 			 struct intel_shared_dpll *pll)
163 {
164 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
165 
166 	if (drm_WARN_ON(&dev_priv->drm,
167 			pll_idx < 0 ||
168 			pll_idx >= dev_priv->dpll.num_shared_dpll))
169 		return -1;
170 
171 	return pll_idx;
172 }
173 
174 /* For ILK+ */
175 void assert_shared_dpll(struct drm_i915_private *dev_priv,
176 			struct intel_shared_dpll *pll,
177 			bool state)
178 {
179 	bool cur_state;
180 	struct intel_dpll_hw_state hw_state;
181 
182 	if (drm_WARN(&dev_priv->drm, !pll,
183 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
184 		return;
185 
186 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
187 	I915_STATE_WARN(cur_state != state,
188 	     "%s assertion failure (expected %s, current %s)\n",
189 			pll->info->name, str_on_off(state),
190 			str_on_off(cur_state));
191 }
192 
193 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
194 {
195 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
196 }
197 
198 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
199 {
200 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
201 }
202 
203 static i915_reg_t
204 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
205 			   struct intel_shared_dpll *pll)
206 {
207 	if (IS_DG1(i915))
208 		return DG1_DPLL_ENABLE(pll->info->id);
209 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
210 		return MG_PLL_ENABLE(0);
211 
212 	return ICL_DPLL_ENABLE(pll->info->id);
213 }
214 
215 static i915_reg_t
216 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
217 			struct intel_shared_dpll *pll)
218 {
219 	const enum intel_dpll_id id = pll->info->id;
220 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
221 
222 	if (IS_ALDERLAKE_P(i915))
223 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
224 
225 	return MG_PLL_ENABLE(tc_port);
226 }
227 
228 /**
229  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
230  * @crtc_state: CRTC, and its state, which has a shared DPLL
231  *
232  * Enable the shared DPLL used by @crtc.
233  */
234 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
235 {
236 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
237 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
238 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
239 	unsigned int pipe_mask = BIT(crtc->pipe);
240 	unsigned int old_mask;
241 
242 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
243 		return;
244 
245 	mutex_lock(&dev_priv->dpll.lock);
246 	old_mask = pll->active_mask;
247 
248 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
249 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
250 		goto out;
251 
252 	pll->active_mask |= pipe_mask;
253 
254 	drm_dbg_kms(&dev_priv->drm,
255 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
256 		    pll->info->name, pll->active_mask, pll->on,
257 		    crtc->base.base.id, crtc->base.name);
258 
259 	if (old_mask) {
260 		drm_WARN_ON(&dev_priv->drm, !pll->on);
261 		assert_shared_dpll_enabled(dev_priv, pll);
262 		goto out;
263 	}
264 	drm_WARN_ON(&dev_priv->drm, pll->on);
265 
266 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
267 	pll->info->funcs->enable(dev_priv, pll);
268 	pll->on = true;
269 
270 out:
271 	mutex_unlock(&dev_priv->dpll.lock);
272 }
273 
274 /**
275  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
276  * @crtc_state: CRTC, and its state, which has a shared DPLL
277  *
278  * Disable the shared DPLL used by @crtc.
279  */
280 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
281 {
282 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
283 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
284 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
285 	unsigned int pipe_mask = BIT(crtc->pipe);
286 
287 	/* PCH only available on ILK+ */
288 	if (DISPLAY_VER(dev_priv) < 5)
289 		return;
290 
291 	if (pll == NULL)
292 		return;
293 
294 	mutex_lock(&dev_priv->dpll.lock);
295 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
296 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
297 		     crtc->base.base.id, crtc->base.name))
298 		goto out;
299 
300 	drm_dbg_kms(&dev_priv->drm,
301 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
302 		    pll->info->name, pll->active_mask, pll->on,
303 		    crtc->base.base.id, crtc->base.name);
304 
305 	assert_shared_dpll_enabled(dev_priv, pll);
306 	drm_WARN_ON(&dev_priv->drm, !pll->on);
307 
308 	pll->active_mask &= ~pipe_mask;
309 	if (pll->active_mask)
310 		goto out;
311 
312 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
313 	pll->info->funcs->disable(dev_priv, pll);
314 	pll->on = false;
315 
316 out:
317 	mutex_unlock(&dev_priv->dpll.lock);
318 }
319 
320 static struct intel_shared_dpll *
321 intel_find_shared_dpll(struct intel_atomic_state *state,
322 		       const struct intel_crtc *crtc,
323 		       const struct intel_dpll_hw_state *pll_state,
324 		       unsigned long dpll_mask)
325 {
326 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
327 	struct intel_shared_dpll *pll, *unused_pll = NULL;
328 	struct intel_shared_dpll_state *shared_dpll;
329 	enum intel_dpll_id i;
330 
331 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
332 
333 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
334 
335 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
336 		pll = &dev_priv->dpll.shared_dplls[i];
337 
338 		/* Only want to check enabled timings first */
339 		if (shared_dpll[i].pipe_mask == 0) {
340 			if (!unused_pll)
341 				unused_pll = pll;
342 			continue;
343 		}
344 
345 		if (memcmp(pll_state,
346 			   &shared_dpll[i].hw_state,
347 			   sizeof(*pll_state)) == 0) {
348 			drm_dbg_kms(&dev_priv->drm,
349 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
350 				    crtc->base.base.id, crtc->base.name,
351 				    pll->info->name,
352 				    shared_dpll[i].pipe_mask,
353 				    pll->active_mask);
354 			return pll;
355 		}
356 	}
357 
358 	/* Ok no matching timings, maybe there's a free one? */
359 	if (unused_pll) {
360 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
361 			    crtc->base.base.id, crtc->base.name,
362 			    unused_pll->info->name);
363 		return unused_pll;
364 	}
365 
366 	return NULL;
367 }
368 
369 static void
370 intel_reference_shared_dpll(struct intel_atomic_state *state,
371 			    const struct intel_crtc *crtc,
372 			    const struct intel_shared_dpll *pll,
373 			    const struct intel_dpll_hw_state *pll_state)
374 {
375 	struct drm_i915_private *i915 = to_i915(state->base.dev);
376 	struct intel_shared_dpll_state *shared_dpll;
377 	const enum intel_dpll_id id = pll->info->id;
378 
379 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
380 
381 	if (shared_dpll[id].pipe_mask == 0)
382 		shared_dpll[id].hw_state = *pll_state;
383 
384 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
385 		pipe_name(crtc->pipe));
386 
387 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
388 }
389 
390 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
391 					  const struct intel_crtc *crtc,
392 					  const struct intel_shared_dpll *pll)
393 {
394 	struct intel_shared_dpll_state *shared_dpll;
395 
396 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
397 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
398 }
399 
400 static void intel_put_dpll(struct intel_atomic_state *state,
401 			   struct intel_crtc *crtc)
402 {
403 	const struct intel_crtc_state *old_crtc_state =
404 		intel_atomic_get_old_crtc_state(state, crtc);
405 	struct intel_crtc_state *new_crtc_state =
406 		intel_atomic_get_new_crtc_state(state, crtc);
407 
408 	new_crtc_state->shared_dpll = NULL;
409 
410 	if (!old_crtc_state->shared_dpll)
411 		return;
412 
413 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
414 }
415 
416 /**
417  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
418  * @state: atomic state
419  *
420  * This is the dpll version of drm_atomic_helper_swap_state() since the
421  * helper does not handle driver-specific global state.
422  *
423  * For consistency with atomic helpers this function does a complete swap,
424  * i.e. it also puts the current state into @state, even though there is no
425  * need for that at this moment.
426  */
427 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
428 {
429 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
430 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
431 	enum intel_dpll_id i;
432 
433 	if (!state->dpll_set)
434 		return;
435 
436 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
437 		struct intel_shared_dpll *pll =
438 			&dev_priv->dpll.shared_dplls[i];
439 
440 		swap(pll->state, shared_dpll[i]);
441 	}
442 }
443 
444 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
445 				      struct intel_shared_dpll *pll,
446 				      struct intel_dpll_hw_state *hw_state)
447 {
448 	const enum intel_dpll_id id = pll->info->id;
449 	intel_wakeref_t wakeref;
450 	u32 val;
451 
452 	wakeref = intel_display_power_get_if_enabled(dev_priv,
453 						     POWER_DOMAIN_DISPLAY_CORE);
454 	if (!wakeref)
455 		return false;
456 
457 	val = intel_de_read(dev_priv, PCH_DPLL(id));
458 	hw_state->dpll = val;
459 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
460 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
461 
462 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
463 
464 	return val & DPLL_VCO_ENABLE;
465 }
466 
467 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
468 {
469 	u32 val;
470 	bool enabled;
471 
472 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
473 
474 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
475 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
476 			    DREF_SUPERSPREAD_SOURCE_MASK));
477 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
478 }
479 
480 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
481 				struct intel_shared_dpll *pll)
482 {
483 	const enum intel_dpll_id id = pll->info->id;
484 
485 	/* PCH refclock must be enabled first */
486 	ibx_assert_pch_refclk_enabled(dev_priv);
487 
488 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
489 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
490 
491 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
492 
493 	/* Wait for the clocks to stabilize. */
494 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
495 	udelay(150);
496 
497 	/* The pixel multiplier can only be updated once the
498 	 * DPLL is enabled and the clocks are stable.
499 	 *
500 	 * So write it again.
501 	 */
502 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
503 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
504 	udelay(200);
505 }
506 
507 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
508 				 struct intel_shared_dpll *pll)
509 {
510 	const enum intel_dpll_id id = pll->info->id;
511 
512 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
513 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
514 	udelay(200);
515 }
516 
517 static bool ibx_get_dpll(struct intel_atomic_state *state,
518 			 struct intel_crtc *crtc,
519 			 struct intel_encoder *encoder)
520 {
521 	struct intel_crtc_state *crtc_state =
522 		intel_atomic_get_new_crtc_state(state, crtc);
523 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
524 	struct intel_shared_dpll *pll;
525 	enum intel_dpll_id i;
526 
527 	if (HAS_PCH_IBX(dev_priv)) {
528 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
529 		i = (enum intel_dpll_id) crtc->pipe;
530 		pll = &dev_priv->dpll.shared_dplls[i];
531 
532 		drm_dbg_kms(&dev_priv->drm,
533 			    "[CRTC:%d:%s] using pre-allocated %s\n",
534 			    crtc->base.base.id, crtc->base.name,
535 			    pll->info->name);
536 	} else {
537 		pll = intel_find_shared_dpll(state, crtc,
538 					     &crtc_state->dpll_hw_state,
539 					     BIT(DPLL_ID_PCH_PLL_B) |
540 					     BIT(DPLL_ID_PCH_PLL_A));
541 	}
542 
543 	if (!pll)
544 		return false;
545 
546 	/* reference the pll */
547 	intel_reference_shared_dpll(state, crtc,
548 				    pll, &crtc_state->dpll_hw_state);
549 
550 	crtc_state->shared_dpll = pll;
551 
552 	return true;
553 }
554 
555 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
556 			      const struct intel_dpll_hw_state *hw_state)
557 {
558 	drm_dbg_kms(&dev_priv->drm,
559 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
560 		    "fp0: 0x%x, fp1: 0x%x\n",
561 		    hw_state->dpll,
562 		    hw_state->dpll_md,
563 		    hw_state->fp0,
564 		    hw_state->fp1);
565 }
566 
567 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
568 	.enable = ibx_pch_dpll_enable,
569 	.disable = ibx_pch_dpll_disable,
570 	.get_hw_state = ibx_pch_dpll_get_hw_state,
571 };
572 
573 static const struct dpll_info pch_plls[] = {
574 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
575 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
576 	{ },
577 };
578 
579 static const struct intel_dpll_mgr pch_pll_mgr = {
580 	.dpll_info = pch_plls,
581 	.get_dplls = ibx_get_dpll,
582 	.put_dplls = intel_put_dpll,
583 	.dump_hw_state = ibx_dump_hw_state,
584 };
585 
586 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
587 			       struct intel_shared_dpll *pll)
588 {
589 	const enum intel_dpll_id id = pll->info->id;
590 
591 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
592 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
593 	udelay(20);
594 }
595 
596 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
597 				struct intel_shared_dpll *pll)
598 {
599 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
600 	intel_de_posting_read(dev_priv, SPLL_CTL);
601 	udelay(20);
602 }
603 
604 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
605 				  struct intel_shared_dpll *pll)
606 {
607 	const enum intel_dpll_id id = pll->info->id;
608 	u32 val;
609 
610 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
611 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
612 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
613 
614 	/*
615 	 * Try to set up the PCH reference clock once all DPLLs
616 	 * that depend on it have been shut down.
617 	 */
618 	if (dev_priv->pch_ssc_use & BIT(id))
619 		intel_init_pch_refclk(dev_priv);
620 }
621 
622 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
623 				 struct intel_shared_dpll *pll)
624 {
625 	enum intel_dpll_id id = pll->info->id;
626 	u32 val;
627 
628 	val = intel_de_read(dev_priv, SPLL_CTL);
629 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
630 	intel_de_posting_read(dev_priv, SPLL_CTL);
631 
632 	/*
633 	 * Try to set up the PCH reference clock once all DPLLs
634 	 * that depend on it have been shut down.
635 	 */
636 	if (dev_priv->pch_ssc_use & BIT(id))
637 		intel_init_pch_refclk(dev_priv);
638 }
639 
640 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
641 				       struct intel_shared_dpll *pll,
642 				       struct intel_dpll_hw_state *hw_state)
643 {
644 	const enum intel_dpll_id id = pll->info->id;
645 	intel_wakeref_t wakeref;
646 	u32 val;
647 
648 	wakeref = intel_display_power_get_if_enabled(dev_priv,
649 						     POWER_DOMAIN_DISPLAY_CORE);
650 	if (!wakeref)
651 		return false;
652 
653 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
654 	hw_state->wrpll = val;
655 
656 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
657 
658 	return val & WRPLL_PLL_ENABLE;
659 }
660 
661 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
662 				      struct intel_shared_dpll *pll,
663 				      struct intel_dpll_hw_state *hw_state)
664 {
665 	intel_wakeref_t wakeref;
666 	u32 val;
667 
668 	wakeref = intel_display_power_get_if_enabled(dev_priv,
669 						     POWER_DOMAIN_DISPLAY_CORE);
670 	if (!wakeref)
671 		return false;
672 
673 	val = intel_de_read(dev_priv, SPLL_CTL);
674 	hw_state->spll = val;
675 
676 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
677 
678 	return val & SPLL_PLL_ENABLE;
679 }
680 
681 #define LC_FREQ 2700
682 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
683 
684 #define P_MIN 2
685 #define P_MAX 64
686 #define P_INC 2
687 
688 /* Constraints for PLL good behavior */
689 #define REF_MIN 48
690 #define REF_MAX 400
691 #define VCO_MIN 2400
692 #define VCO_MAX 4800
693 
694 struct hsw_wrpll_rnp {
695 	unsigned p, n2, r2;
696 };
697 
698 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
699 {
700 	unsigned budget;
701 
702 	switch (clock) {
703 	case 25175000:
704 	case 25200000:
705 	case 27000000:
706 	case 27027000:
707 	case 37762500:
708 	case 37800000:
709 	case 40500000:
710 	case 40541000:
711 	case 54000000:
712 	case 54054000:
713 	case 59341000:
714 	case 59400000:
715 	case 72000000:
716 	case 74176000:
717 	case 74250000:
718 	case 81000000:
719 	case 81081000:
720 	case 89012000:
721 	case 89100000:
722 	case 108000000:
723 	case 108108000:
724 	case 111264000:
725 	case 111375000:
726 	case 148352000:
727 	case 148500000:
728 	case 162000000:
729 	case 162162000:
730 	case 222525000:
731 	case 222750000:
732 	case 296703000:
733 	case 297000000:
734 		budget = 0;
735 		break;
736 	case 233500000:
737 	case 245250000:
738 	case 247750000:
739 	case 253250000:
740 	case 298000000:
741 		budget = 1500;
742 		break;
743 	case 169128000:
744 	case 169500000:
745 	case 179500000:
746 	case 202000000:
747 		budget = 2000;
748 		break;
749 	case 256250000:
750 	case 262500000:
751 	case 270000000:
752 	case 272500000:
753 	case 273750000:
754 	case 280750000:
755 	case 281250000:
756 	case 286000000:
757 	case 291750000:
758 		budget = 4000;
759 		break;
760 	case 267250000:
761 	case 268500000:
762 		budget = 5000;
763 		break;
764 	default:
765 		budget = 1000;
766 		break;
767 	}
768 
769 	return budget;
770 }
771 
772 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
773 				 unsigned int r2, unsigned int n2,
774 				 unsigned int p,
775 				 struct hsw_wrpll_rnp *best)
776 {
777 	u64 a, b, c, d, diff, diff_best;
778 
779 	/* No best (r,n,p) yet */
780 	if (best->p == 0) {
781 		best->p = p;
782 		best->n2 = n2;
783 		best->r2 = r2;
784 		return;
785 	}
786 
787 	/*
788 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
789 	 * freq2k.
790 	 *
791 	 * delta = 1e6 *
792 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
793 	 *	   freq2k;
794 	 *
795 	 * and we would like delta <= budget.
796 	 *
797 	 * If the discrepancy is above the PPM-based budget, always prefer to
798 	 * improve upon the previous solution.  However, if you're within the
799 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
800 	 */
801 	a = freq2k * budget * p * r2;
802 	b = freq2k * budget * best->p * best->r2;
803 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
804 	diff_best = abs_diff(freq2k * best->p * best->r2,
805 			     LC_FREQ_2K * best->n2);
806 	c = 1000000 * diff;
807 	d = 1000000 * diff_best;
808 
809 	if (a < c && b < d) {
810 		/* If both are above the budget, pick the closer */
811 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
812 			best->p = p;
813 			best->n2 = n2;
814 			best->r2 = r2;
815 		}
816 	} else if (a >= c && b < d) {
817 		/* If A is below the threshold but B is above it?  Update. */
818 		best->p = p;
819 		best->n2 = n2;
820 		best->r2 = r2;
821 	} else if (a >= c && b >= d) {
822 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
823 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
824 			best->p = p;
825 			best->n2 = n2;
826 			best->r2 = r2;
827 		}
828 	}
829 	/* Otherwise a < c && b >= d, do nothing */
830 }
831 
832 static void
833 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
834 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
835 {
836 	u64 freq2k;
837 	unsigned p, n2, r2;
838 	struct hsw_wrpll_rnp best = {};
839 	unsigned budget;
840 
841 	freq2k = clock / 100;
842 
843 	budget = hsw_wrpll_get_budget_for_freq(clock);
844 
845 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
846 	 * and directly pass the LC PLL to it. */
847 	if (freq2k == 5400000) {
848 		*n2_out = 2;
849 		*p_out = 1;
850 		*r2_out = 2;
851 		return;
852 	}
853 
854 	/*
855 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
856 	 * the WR PLL.
857 	 *
858 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
859 	 * Injecting R2 = 2 * R gives:
860 	 *   REF_MAX * r2 > LC_FREQ * 2 and
861 	 *   REF_MIN * r2 < LC_FREQ * 2
862 	 *
863 	 * Which means the desired boundaries for r2 are:
864 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
865 	 *
866 	 */
867 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
868 	     r2 <= LC_FREQ * 2 / REF_MIN;
869 	     r2++) {
870 
871 		/*
872 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
873 		 *
874 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
875 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
876 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
877 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
878 		 *
879 		 * Which means the desired boundaries for n2 are:
880 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
881 		 */
882 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
883 		     n2 <= VCO_MAX * r2 / LC_FREQ;
884 		     n2++) {
885 
886 			for (p = P_MIN; p <= P_MAX; p += P_INC)
887 				hsw_wrpll_update_rnp(freq2k, budget,
888 						     r2, n2, p, &best);
889 		}
890 	}
891 
892 	*n2_out = best.n2;
893 	*p_out = best.p;
894 	*r2_out = best.r2;
895 }
896 
897 static struct intel_shared_dpll *
898 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
899 		       struct intel_crtc *crtc)
900 {
901 	struct intel_crtc_state *crtc_state =
902 		intel_atomic_get_new_crtc_state(state, crtc);
903 	struct intel_shared_dpll *pll;
904 	u32 val;
905 	unsigned int p, n2, r2;
906 
907 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
908 
909 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
910 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
911 	      WRPLL_DIVIDER_POST(p);
912 
913 	crtc_state->dpll_hw_state.wrpll = val;
914 
915 	pll = intel_find_shared_dpll(state, crtc,
916 				     &crtc_state->dpll_hw_state,
917 				     BIT(DPLL_ID_WRPLL2) |
918 				     BIT(DPLL_ID_WRPLL1));
919 
920 	if (!pll)
921 		return NULL;
922 
923 	return pll;
924 }
925 
926 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
927 				  const struct intel_shared_dpll *pll,
928 				  const struct intel_dpll_hw_state *pll_state)
929 {
930 	int refclk;
931 	int n, p, r;
932 	u32 wrpll = pll_state->wrpll;
933 
934 	switch (wrpll & WRPLL_REF_MASK) {
935 	case WRPLL_REF_SPECIAL_HSW:
936 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
937 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
938 			refclk = dev_priv->dpll.ref_clks.nssc;
939 			break;
940 		}
941 		fallthrough;
942 	case WRPLL_REF_PCH_SSC:
943 		/*
944 		 * We could calculate spread here, but our checking
945 		 * code only cares about 5% accuracy, and spread is a max of
946 		 * 0.5% downspread.
947 		 */
948 		refclk = dev_priv->dpll.ref_clks.ssc;
949 		break;
950 	case WRPLL_REF_LCPLL:
951 		refclk = 2700000;
952 		break;
953 	default:
954 		MISSING_CASE(wrpll);
955 		return 0;
956 	}
957 
958 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
959 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
960 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
961 
962 	/* Convert to KHz, p & r have a fixed point portion */
963 	return (refclk * n / 10) / (p * r) * 2;
964 }
965 
966 static struct intel_shared_dpll *
967 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
968 {
969 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
970 	struct intel_shared_dpll *pll;
971 	enum intel_dpll_id pll_id;
972 	int clock = crtc_state->port_clock;
973 
974 	switch (clock / 2) {
975 	case 81000:
976 		pll_id = DPLL_ID_LCPLL_810;
977 		break;
978 	case 135000:
979 		pll_id = DPLL_ID_LCPLL_1350;
980 		break;
981 	case 270000:
982 		pll_id = DPLL_ID_LCPLL_2700;
983 		break;
984 	default:
985 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
986 			    clock);
987 		return NULL;
988 	}
989 
990 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
991 
992 	if (!pll)
993 		return NULL;
994 
995 	return pll;
996 }
997 
998 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
999 				  const struct intel_shared_dpll *pll,
1000 				  const struct intel_dpll_hw_state *pll_state)
1001 {
1002 	int link_clock = 0;
1003 
1004 	switch (pll->info->id) {
1005 	case DPLL_ID_LCPLL_810:
1006 		link_clock = 81000;
1007 		break;
1008 	case DPLL_ID_LCPLL_1350:
1009 		link_clock = 135000;
1010 		break;
1011 	case DPLL_ID_LCPLL_2700:
1012 		link_clock = 270000;
1013 		break;
1014 	default:
1015 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1016 		break;
1017 	}
1018 
1019 	return link_clock * 2;
1020 }
1021 
1022 static struct intel_shared_dpll *
1023 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1024 		      struct intel_crtc *crtc)
1025 {
1026 	struct intel_crtc_state *crtc_state =
1027 		intel_atomic_get_new_crtc_state(state, crtc);
1028 
1029 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1030 		return NULL;
1031 
1032 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1033 					 SPLL_REF_MUXED_SSC;
1034 
1035 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1036 				      BIT(DPLL_ID_SPLL));
1037 }
1038 
1039 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1040 				 const struct intel_shared_dpll *pll,
1041 				 const struct intel_dpll_hw_state *pll_state)
1042 {
1043 	int link_clock = 0;
1044 
1045 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1046 	case SPLL_FREQ_810MHz:
1047 		link_clock = 81000;
1048 		break;
1049 	case SPLL_FREQ_1350MHz:
1050 		link_clock = 135000;
1051 		break;
1052 	case SPLL_FREQ_2700MHz:
1053 		link_clock = 270000;
1054 		break;
1055 	default:
1056 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1057 		break;
1058 	}
1059 
1060 	return link_clock * 2;
1061 }
1062 
1063 static bool hsw_get_dpll(struct intel_atomic_state *state,
1064 			 struct intel_crtc *crtc,
1065 			 struct intel_encoder *encoder)
1066 {
1067 	struct intel_crtc_state *crtc_state =
1068 		intel_atomic_get_new_crtc_state(state, crtc);
1069 	struct intel_shared_dpll *pll;
1070 
1071 	memset(&crtc_state->dpll_hw_state, 0,
1072 	       sizeof(crtc_state->dpll_hw_state));
1073 
1074 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1075 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1076 	else if (intel_crtc_has_dp_encoder(crtc_state))
1077 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1078 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1079 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1080 	else
1081 		return false;
1082 
1083 	if (!pll)
1084 		return false;
1085 
1086 	intel_reference_shared_dpll(state, crtc,
1087 				    pll, &crtc_state->dpll_hw_state);
1088 
1089 	crtc_state->shared_dpll = pll;
1090 
1091 	return true;
1092 }
1093 
1094 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1095 {
1096 	i915->dpll.ref_clks.ssc = 135000;
1097 	/* Non-SSC is only used on non-ULT HSW. */
1098 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1099 		i915->dpll.ref_clks.nssc = 24000;
1100 	else
1101 		i915->dpll.ref_clks.nssc = 135000;
1102 }
1103 
1104 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1105 			      const struct intel_dpll_hw_state *hw_state)
1106 {
1107 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1108 		    hw_state->wrpll, hw_state->spll);
1109 }
1110 
1111 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1112 	.enable = hsw_ddi_wrpll_enable,
1113 	.disable = hsw_ddi_wrpll_disable,
1114 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1115 	.get_freq = hsw_ddi_wrpll_get_freq,
1116 };
1117 
1118 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1119 	.enable = hsw_ddi_spll_enable,
1120 	.disable = hsw_ddi_spll_disable,
1121 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1122 	.get_freq = hsw_ddi_spll_get_freq,
1123 };
1124 
1125 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1126 				 struct intel_shared_dpll *pll)
1127 {
1128 }
1129 
1130 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1131 				  struct intel_shared_dpll *pll)
1132 {
1133 }
1134 
1135 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1136 				       struct intel_shared_dpll *pll,
1137 				       struct intel_dpll_hw_state *hw_state)
1138 {
1139 	return true;
1140 }
1141 
1142 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1143 	.enable = hsw_ddi_lcpll_enable,
1144 	.disable = hsw_ddi_lcpll_disable,
1145 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1146 	.get_freq = hsw_ddi_lcpll_get_freq,
1147 };
1148 
1149 static const struct dpll_info hsw_plls[] = {
1150 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1151 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1152 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1153 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1154 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1155 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1156 	{ },
1157 };
1158 
1159 static const struct intel_dpll_mgr hsw_pll_mgr = {
1160 	.dpll_info = hsw_plls,
1161 	.get_dplls = hsw_get_dpll,
1162 	.put_dplls = intel_put_dpll,
1163 	.update_ref_clks = hsw_update_dpll_ref_clks,
1164 	.dump_hw_state = hsw_dump_hw_state,
1165 };
1166 
1167 struct skl_dpll_regs {
1168 	i915_reg_t ctl, cfgcr1, cfgcr2;
1169 };
1170 
1171 /* this array is indexed by the *shared* pll id */
1172 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1173 	{
1174 		/* DPLL 0 */
1175 		.ctl = LCPLL1_CTL,
1176 		/* DPLL 0 doesn't support HDMI mode */
1177 	},
1178 	{
1179 		/* DPLL 1 */
1180 		.ctl = LCPLL2_CTL,
1181 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1182 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1183 	},
1184 	{
1185 		/* DPLL 2 */
1186 		.ctl = WRPLL_CTL(0),
1187 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1188 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1189 	},
1190 	{
1191 		/* DPLL 3 */
1192 		.ctl = WRPLL_CTL(1),
1193 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1194 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1195 	},
1196 };
1197 
1198 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1199 				    struct intel_shared_dpll *pll)
1200 {
1201 	const enum intel_dpll_id id = pll->info->id;
1202 	u32 val;
1203 
1204 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1205 
1206 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1207 		 DPLL_CTRL1_SSC(id) |
1208 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1209 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1210 
1211 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1212 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1213 }
1214 
1215 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1216 			       struct intel_shared_dpll *pll)
1217 {
1218 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1219 	const enum intel_dpll_id id = pll->info->id;
1220 
1221 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1222 
1223 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1224 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1225 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1226 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1227 
1228 	/* the enable bit is always bit 31 */
1229 	intel_de_write(dev_priv, regs[id].ctl,
1230 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1231 
1232 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1233 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1234 }
1235 
1236 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1237 				 struct intel_shared_dpll *pll)
1238 {
1239 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1240 }
1241 
1242 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1243 				struct intel_shared_dpll *pll)
1244 {
1245 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1246 	const enum intel_dpll_id id = pll->info->id;
1247 
1248 	/* the enable bit is always bit 31 */
1249 	intel_de_write(dev_priv, regs[id].ctl,
1250 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1251 	intel_de_posting_read(dev_priv, regs[id].ctl);
1252 }
1253 
1254 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1255 				  struct intel_shared_dpll *pll)
1256 {
1257 }
1258 
1259 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1260 				     struct intel_shared_dpll *pll,
1261 				     struct intel_dpll_hw_state *hw_state)
1262 {
1263 	u32 val;
1264 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1265 	const enum intel_dpll_id id = pll->info->id;
1266 	intel_wakeref_t wakeref;
1267 	bool ret;
1268 
1269 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1270 						     POWER_DOMAIN_DISPLAY_CORE);
1271 	if (!wakeref)
1272 		return false;
1273 
1274 	ret = false;
1275 
1276 	val = intel_de_read(dev_priv, regs[id].ctl);
1277 	if (!(val & LCPLL_PLL_ENABLE))
1278 		goto out;
1279 
1280 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1281 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1282 
1283 	/* avoid reading back stale values if HDMI mode is not enabled */
1284 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1285 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1286 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1287 	}
1288 	ret = true;
1289 
1290 out:
1291 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1292 
1293 	return ret;
1294 }
1295 
1296 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1297 				       struct intel_shared_dpll *pll,
1298 				       struct intel_dpll_hw_state *hw_state)
1299 {
1300 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1301 	const enum intel_dpll_id id = pll->info->id;
1302 	intel_wakeref_t wakeref;
1303 	u32 val;
1304 	bool ret;
1305 
1306 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1307 						     POWER_DOMAIN_DISPLAY_CORE);
1308 	if (!wakeref)
1309 		return false;
1310 
1311 	ret = false;
1312 
1313 	/* DPLL0 is always enabled since it drives CDCLK */
1314 	val = intel_de_read(dev_priv, regs[id].ctl);
1315 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1316 		goto out;
1317 
1318 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1319 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1320 
1321 	ret = true;
1322 
1323 out:
1324 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1325 
1326 	return ret;
1327 }
1328 
1329 struct skl_wrpll_context {
1330 	u64 min_deviation;		/* current minimal deviation */
1331 	u64 central_freq;		/* chosen central freq */
1332 	u64 dco_freq;			/* chosen dco freq */
1333 	unsigned int p;			/* chosen divider */
1334 };
1335 
1336 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1337 #define SKL_DCO_MAX_PDEVIATION	100
1338 #define SKL_DCO_MAX_NDEVIATION	600
1339 
1340 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1341 				  u64 central_freq,
1342 				  u64 dco_freq,
1343 				  unsigned int divider)
1344 {
1345 	u64 deviation;
1346 
1347 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1348 			      central_freq);
1349 
1350 	/* positive deviation */
1351 	if (dco_freq >= central_freq) {
1352 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1353 		    deviation < ctx->min_deviation) {
1354 			ctx->min_deviation = deviation;
1355 			ctx->central_freq = central_freq;
1356 			ctx->dco_freq = dco_freq;
1357 			ctx->p = divider;
1358 		}
1359 	/* negative deviation */
1360 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1361 		   deviation < ctx->min_deviation) {
1362 		ctx->min_deviation = deviation;
1363 		ctx->central_freq = central_freq;
1364 		ctx->dco_freq = dco_freq;
1365 		ctx->p = divider;
1366 	}
1367 }
1368 
1369 static void skl_wrpll_get_multipliers(unsigned int p,
1370 				      unsigned int *p0 /* out */,
1371 				      unsigned int *p1 /* out */,
1372 				      unsigned int *p2 /* out */)
1373 {
1374 	/* even dividers */
1375 	if (p % 2 == 0) {
1376 		unsigned int half = p / 2;
1377 
1378 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1379 			*p0 = 2;
1380 			*p1 = 1;
1381 			*p2 = half;
1382 		} else if (half % 2 == 0) {
1383 			*p0 = 2;
1384 			*p1 = half / 2;
1385 			*p2 = 2;
1386 		} else if (half % 3 == 0) {
1387 			*p0 = 3;
1388 			*p1 = half / 3;
1389 			*p2 = 2;
1390 		} else if (half % 7 == 0) {
1391 			*p0 = 7;
1392 			*p1 = half / 7;
1393 			*p2 = 2;
1394 		}
1395 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1396 		*p0 = 3;
1397 		*p1 = 1;
1398 		*p2 = p / 3;
1399 	} else if (p == 5 || p == 7) {
1400 		*p0 = p;
1401 		*p1 = 1;
1402 		*p2 = 1;
1403 	} else if (p == 15) {
1404 		*p0 = 3;
1405 		*p1 = 1;
1406 		*p2 = 5;
1407 	} else if (p == 21) {
1408 		*p0 = 7;
1409 		*p1 = 1;
1410 		*p2 = 3;
1411 	} else if (p == 35) {
1412 		*p0 = 7;
1413 		*p1 = 1;
1414 		*p2 = 5;
1415 	}
1416 }
1417 
1418 struct skl_wrpll_params {
1419 	u32 dco_fraction;
1420 	u32 dco_integer;
1421 	u32 qdiv_ratio;
1422 	u32 qdiv_mode;
1423 	u32 kdiv;
1424 	u32 pdiv;
1425 	u32 central_freq;
1426 };
1427 
1428 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1429 				      u64 afe_clock,
1430 				      int ref_clock,
1431 				      u64 central_freq,
1432 				      u32 p0, u32 p1, u32 p2)
1433 {
1434 	u64 dco_freq;
1435 
1436 	switch (central_freq) {
1437 	case 9600000000ULL:
1438 		params->central_freq = 0;
1439 		break;
1440 	case 9000000000ULL:
1441 		params->central_freq = 1;
1442 		break;
1443 	case 8400000000ULL:
1444 		params->central_freq = 3;
1445 	}
1446 
1447 	switch (p0) {
1448 	case 1:
1449 		params->pdiv = 0;
1450 		break;
1451 	case 2:
1452 		params->pdiv = 1;
1453 		break;
1454 	case 3:
1455 		params->pdiv = 2;
1456 		break;
1457 	case 7:
1458 		params->pdiv = 4;
1459 		break;
1460 	default:
1461 		WARN(1, "Incorrect PDiv\n");
1462 	}
1463 
1464 	switch (p2) {
1465 	case 5:
1466 		params->kdiv = 0;
1467 		break;
1468 	case 2:
1469 		params->kdiv = 1;
1470 		break;
1471 	case 3:
1472 		params->kdiv = 2;
1473 		break;
1474 	case 1:
1475 		params->kdiv = 3;
1476 		break;
1477 	default:
1478 		WARN(1, "Incorrect KDiv\n");
1479 	}
1480 
1481 	params->qdiv_ratio = p1;
1482 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1483 
1484 	dco_freq = p0 * p1 * p2 * afe_clock;
1485 
1486 	/*
1487 	 * Intermediate values are in Hz.
1488 	 * Divide by MHz to match bsepc
1489 	 */
1490 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1491 	params->dco_fraction =
1492 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1493 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1494 }
1495 
1496 static bool
1497 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1498 			int ref_clock,
1499 			struct skl_wrpll_params *wrpll_params)
1500 {
1501 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1502 						 9000000000ULL,
1503 						 9600000000ULL };
1504 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1505 					    24, 28, 30, 32, 36, 40, 42, 44,
1506 					    48, 52, 54, 56, 60, 64, 66, 68,
1507 					    70, 72, 76, 78, 80, 84, 88, 90,
1508 					    92, 96, 98 };
1509 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1510 	static const struct {
1511 		const u8 *list;
1512 		int n_dividers;
1513 	} dividers[] = {
1514 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1515 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1516 	};
1517 	struct skl_wrpll_context ctx = {
1518 		.min_deviation = U64_MAX,
1519 	};
1520 	unsigned int dco, d, i;
1521 	unsigned int p0, p1, p2;
1522 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1523 
1524 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1525 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1526 			for (i = 0; i < dividers[d].n_dividers; i++) {
1527 				unsigned int p = dividers[d].list[i];
1528 				u64 dco_freq = p * afe_clock;
1529 
1530 				skl_wrpll_try_divider(&ctx,
1531 						      dco_central_freq[dco],
1532 						      dco_freq,
1533 						      p);
1534 				/*
1535 				 * Skip the remaining dividers if we're sure to
1536 				 * have found the definitive divider, we can't
1537 				 * improve a 0 deviation.
1538 				 */
1539 				if (ctx.min_deviation == 0)
1540 					goto skip_remaining_dividers;
1541 			}
1542 		}
1543 
1544 skip_remaining_dividers:
1545 		/*
1546 		 * If a solution is found with an even divider, prefer
1547 		 * this one.
1548 		 */
1549 		if (d == 0 && ctx.p)
1550 			break;
1551 	}
1552 
1553 	if (!ctx.p) {
1554 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1555 		return false;
1556 	}
1557 
1558 	/*
1559 	 * gcc incorrectly analyses that these can be used without being
1560 	 * initialized. To be fair, it's hard to guess.
1561 	 */
1562 	p0 = p1 = p2 = 0;
1563 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1564 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1565 				  ctx.central_freq, p0, p1, p2);
1566 
1567 	return true;
1568 }
1569 
1570 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1571 {
1572 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1573 	struct skl_wrpll_params wrpll_params = {};
1574 	u32 ctrl1, cfgcr1, cfgcr2;
1575 
1576 	/*
1577 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1578 	 * as the DPLL id in this function.
1579 	 */
1580 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1581 
1582 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1583 
1584 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1585 				     i915->dpll.ref_clks.nssc,
1586 				     &wrpll_params))
1587 		return false;
1588 
1589 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1590 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1591 		wrpll_params.dco_integer;
1592 
1593 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1594 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1595 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1596 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1597 		wrpll_params.central_freq;
1598 
1599 	memset(&crtc_state->dpll_hw_state, 0,
1600 	       sizeof(crtc_state->dpll_hw_state));
1601 
1602 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1603 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1604 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1605 	return true;
1606 }
1607 
1608 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1609 				  const struct intel_shared_dpll *pll,
1610 				  const struct intel_dpll_hw_state *pll_state)
1611 {
1612 	int ref_clock = i915->dpll.ref_clks.nssc;
1613 	u32 p0, p1, p2, dco_freq;
1614 
1615 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1616 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1617 
1618 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1619 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1620 	else
1621 		p1 = 1;
1622 
1623 
1624 	switch (p0) {
1625 	case DPLL_CFGCR2_PDIV_1:
1626 		p0 = 1;
1627 		break;
1628 	case DPLL_CFGCR2_PDIV_2:
1629 		p0 = 2;
1630 		break;
1631 	case DPLL_CFGCR2_PDIV_3:
1632 		p0 = 3;
1633 		break;
1634 	case DPLL_CFGCR2_PDIV_7_INVALID:
1635 		/*
1636 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1637 		 * handling it the same way as PDIV_7.
1638 		 */
1639 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1640 		fallthrough;
1641 	case DPLL_CFGCR2_PDIV_7:
1642 		p0 = 7;
1643 		break;
1644 	default:
1645 		MISSING_CASE(p0);
1646 		return 0;
1647 	}
1648 
1649 	switch (p2) {
1650 	case DPLL_CFGCR2_KDIV_5:
1651 		p2 = 5;
1652 		break;
1653 	case DPLL_CFGCR2_KDIV_2:
1654 		p2 = 2;
1655 		break;
1656 	case DPLL_CFGCR2_KDIV_3:
1657 		p2 = 3;
1658 		break;
1659 	case DPLL_CFGCR2_KDIV_1:
1660 		p2 = 1;
1661 		break;
1662 	default:
1663 		MISSING_CASE(p2);
1664 		return 0;
1665 	}
1666 
1667 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1668 		   ref_clock;
1669 
1670 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1671 		    ref_clock / 0x8000;
1672 
1673 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1674 		return 0;
1675 
1676 	return dco_freq / (p0 * p1 * p2 * 5);
1677 }
1678 
1679 static bool
1680 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1681 {
1682 	u32 ctrl1;
1683 
1684 	/*
1685 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1686 	 * as the DPLL id in this function.
1687 	 */
1688 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1689 	switch (crtc_state->port_clock / 2) {
1690 	case 81000:
1691 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1692 		break;
1693 	case 135000:
1694 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1695 		break;
1696 	case 270000:
1697 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1698 		break;
1699 		/* eDP 1.4 rates */
1700 	case 162000:
1701 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1702 		break;
1703 	case 108000:
1704 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1705 		break;
1706 	case 216000:
1707 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1708 		break;
1709 	}
1710 
1711 	memset(&crtc_state->dpll_hw_state, 0,
1712 	       sizeof(crtc_state->dpll_hw_state));
1713 
1714 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1715 
1716 	return true;
1717 }
1718 
1719 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1720 				  const struct intel_shared_dpll *pll,
1721 				  const struct intel_dpll_hw_state *pll_state)
1722 {
1723 	int link_clock = 0;
1724 
1725 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1726 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1727 	case DPLL_CTRL1_LINK_RATE_810:
1728 		link_clock = 81000;
1729 		break;
1730 	case DPLL_CTRL1_LINK_RATE_1080:
1731 		link_clock = 108000;
1732 		break;
1733 	case DPLL_CTRL1_LINK_RATE_1350:
1734 		link_clock = 135000;
1735 		break;
1736 	case DPLL_CTRL1_LINK_RATE_1620:
1737 		link_clock = 162000;
1738 		break;
1739 	case DPLL_CTRL1_LINK_RATE_2160:
1740 		link_clock = 216000;
1741 		break;
1742 	case DPLL_CTRL1_LINK_RATE_2700:
1743 		link_clock = 270000;
1744 		break;
1745 	default:
1746 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1747 		break;
1748 	}
1749 
1750 	return link_clock * 2;
1751 }
1752 
1753 static bool skl_get_dpll(struct intel_atomic_state *state,
1754 			 struct intel_crtc *crtc,
1755 			 struct intel_encoder *encoder)
1756 {
1757 	struct intel_crtc_state *crtc_state =
1758 		intel_atomic_get_new_crtc_state(state, crtc);
1759 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1760 	struct intel_shared_dpll *pll;
1761 	bool bret;
1762 
1763 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1764 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1765 		if (!bret) {
1766 			drm_dbg_kms(&i915->drm,
1767 				    "Could not get HDMI pll dividers.\n");
1768 			return false;
1769 		}
1770 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1771 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1772 		if (!bret) {
1773 			drm_dbg_kms(&i915->drm,
1774 				    "Could not set DP dpll HW state.\n");
1775 			return false;
1776 		}
1777 	} else {
1778 		return false;
1779 	}
1780 
1781 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1782 		pll = intel_find_shared_dpll(state, crtc,
1783 					     &crtc_state->dpll_hw_state,
1784 					     BIT(DPLL_ID_SKL_DPLL0));
1785 	else
1786 		pll = intel_find_shared_dpll(state, crtc,
1787 					     &crtc_state->dpll_hw_state,
1788 					     BIT(DPLL_ID_SKL_DPLL3) |
1789 					     BIT(DPLL_ID_SKL_DPLL2) |
1790 					     BIT(DPLL_ID_SKL_DPLL1));
1791 	if (!pll)
1792 		return false;
1793 
1794 	intel_reference_shared_dpll(state, crtc,
1795 				    pll, &crtc_state->dpll_hw_state);
1796 
1797 	crtc_state->shared_dpll = pll;
1798 
1799 	return true;
1800 }
1801 
1802 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1803 				const struct intel_shared_dpll *pll,
1804 				const struct intel_dpll_hw_state *pll_state)
1805 {
1806 	/*
1807 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1808 	 * the internal shift for each field
1809 	 */
1810 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1811 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1812 	else
1813 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1814 }
1815 
1816 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1817 {
1818 	/* No SSC ref */
1819 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1820 }
1821 
1822 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1823 			      const struct intel_dpll_hw_state *hw_state)
1824 {
1825 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1826 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1827 		      hw_state->ctrl1,
1828 		      hw_state->cfgcr1,
1829 		      hw_state->cfgcr2);
1830 }
1831 
1832 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1833 	.enable = skl_ddi_pll_enable,
1834 	.disable = skl_ddi_pll_disable,
1835 	.get_hw_state = skl_ddi_pll_get_hw_state,
1836 	.get_freq = skl_ddi_pll_get_freq,
1837 };
1838 
1839 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1840 	.enable = skl_ddi_dpll0_enable,
1841 	.disable = skl_ddi_dpll0_disable,
1842 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1843 	.get_freq = skl_ddi_pll_get_freq,
1844 };
1845 
1846 static const struct dpll_info skl_plls[] = {
1847 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1848 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1849 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1850 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1851 	{ },
1852 };
1853 
1854 static const struct intel_dpll_mgr skl_pll_mgr = {
1855 	.dpll_info = skl_plls,
1856 	.get_dplls = skl_get_dpll,
1857 	.put_dplls = intel_put_dpll,
1858 	.update_ref_clks = skl_update_dpll_ref_clks,
1859 	.dump_hw_state = skl_dump_hw_state,
1860 };
1861 
1862 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1863 				struct intel_shared_dpll *pll)
1864 {
1865 	u32 temp;
1866 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1867 	enum dpio_phy phy;
1868 	enum dpio_channel ch;
1869 
1870 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1871 
1872 	/* Non-SSC reference */
1873 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1874 	temp |= PORT_PLL_REF_SEL;
1875 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1876 
1877 	if (IS_GEMINILAKE(dev_priv)) {
1878 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1879 		temp |= PORT_PLL_POWER_ENABLE;
1880 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1881 
1882 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1883 				 PORT_PLL_POWER_STATE), 200))
1884 			drm_err(&dev_priv->drm,
1885 				"Power state not set for PLL:%d\n", port);
1886 	}
1887 
1888 	/* Disable 10 bit clock */
1889 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1890 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1891 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1892 
1893 	/* Write P1 & P2 */
1894 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1895 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1896 	temp |= pll->state.hw_state.ebb0;
1897 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1898 
1899 	/* Write M2 integer */
1900 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1901 	temp &= ~PORT_PLL_M2_INT_MASK;
1902 	temp |= pll->state.hw_state.pll0;
1903 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1904 
1905 	/* Write N */
1906 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1907 	temp &= ~PORT_PLL_N_MASK;
1908 	temp |= pll->state.hw_state.pll1;
1909 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1910 
1911 	/* Write M2 fraction */
1912 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1913 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1914 	temp |= pll->state.hw_state.pll2;
1915 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1916 
1917 	/* Write M2 fraction enable */
1918 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1919 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1920 	temp |= pll->state.hw_state.pll3;
1921 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1922 
1923 	/* Write coeff */
1924 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1925 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1926 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1927 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1928 	temp |= pll->state.hw_state.pll6;
1929 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1930 
1931 	/* Write calibration val */
1932 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1933 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1934 	temp |= pll->state.hw_state.pll8;
1935 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1936 
1937 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1938 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1939 	temp |= pll->state.hw_state.pll9;
1940 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1941 
1942 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1943 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1944 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1945 	temp |= pll->state.hw_state.pll10;
1946 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1947 
1948 	/* Recalibrate with new settings */
1949 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1950 	temp |= PORT_PLL_RECALIBRATE;
1951 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1952 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1953 	temp |= pll->state.hw_state.ebb4;
1954 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1955 
1956 	/* Enable PLL */
1957 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1958 	temp |= PORT_PLL_ENABLE;
1959 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1960 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1961 
1962 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1963 			200))
1964 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1965 
1966 	if (IS_GEMINILAKE(dev_priv)) {
1967 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1968 		temp |= DCC_DELAY_RANGE_2;
1969 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1970 	}
1971 
1972 	/*
1973 	 * While we write to the group register to program all lanes at once we
1974 	 * can read only lane registers and we pick lanes 0/1 for that.
1975 	 */
1976 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1977 	temp &= ~LANE_STAGGER_MASK;
1978 	temp &= ~LANESTAGGER_STRAP_OVRD;
1979 	temp |= pll->state.hw_state.pcsdw12;
1980 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1981 }
1982 
1983 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1984 					struct intel_shared_dpll *pll)
1985 {
1986 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1987 	u32 temp;
1988 
1989 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1990 	temp &= ~PORT_PLL_ENABLE;
1991 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1992 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1993 
1994 	if (IS_GEMINILAKE(dev_priv)) {
1995 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1996 		temp &= ~PORT_PLL_POWER_ENABLE;
1997 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1998 
1999 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2000 				  PORT_PLL_POWER_STATE), 200))
2001 			drm_err(&dev_priv->drm,
2002 				"Power state not reset for PLL:%d\n", port);
2003 	}
2004 }
2005 
2006 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2007 					struct intel_shared_dpll *pll,
2008 					struct intel_dpll_hw_state *hw_state)
2009 {
2010 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2011 	intel_wakeref_t wakeref;
2012 	enum dpio_phy phy;
2013 	enum dpio_channel ch;
2014 	u32 val;
2015 	bool ret;
2016 
2017 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2018 
2019 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2020 						     POWER_DOMAIN_DISPLAY_CORE);
2021 	if (!wakeref)
2022 		return false;
2023 
2024 	ret = false;
2025 
2026 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2027 	if (!(val & PORT_PLL_ENABLE))
2028 		goto out;
2029 
2030 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2031 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2032 
2033 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2034 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2035 
2036 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2037 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2038 
2039 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2040 	hw_state->pll1 &= PORT_PLL_N_MASK;
2041 
2042 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2043 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2044 
2045 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2046 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2047 
2048 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2049 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2050 			  PORT_PLL_INT_COEFF_MASK |
2051 			  PORT_PLL_GAIN_CTL_MASK;
2052 
2053 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2054 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2055 
2056 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2057 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2058 
2059 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2060 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2061 			   PORT_PLL_DCO_AMP_MASK;
2062 
2063 	/*
2064 	 * While we write to the group register to program all lanes at once we
2065 	 * can read only lane registers. We configure all lanes the same way, so
2066 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2067 	 */
2068 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2069 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2070 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2071 		drm_dbg(&dev_priv->drm,
2072 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2073 			hw_state->pcsdw12,
2074 			intel_de_read(dev_priv,
2075 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2076 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2077 
2078 	ret = true;
2079 
2080 out:
2081 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2082 
2083 	return ret;
2084 }
2085 
2086 /* pre-calculated values for DP linkrates */
2087 static const struct dpll bxt_dp_clk_val[] = {
2088 	/* m2 is .22 binary fixed point */
2089 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2090 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2091 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2092 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2093 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2094 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2095 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2096 };
2097 
2098 static bool
2099 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2100 			  struct dpll *clk_div)
2101 {
2102 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2103 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2104 
2105 	/* Calculate HDMI div */
2106 	/*
2107 	 * FIXME: tie the following calculation into
2108 	 * i9xx_crtc_compute_clock
2109 	 */
2110 	if (!bxt_find_best_dpll(crtc_state, clk_div)) {
2111 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2112 			crtc_state->port_clock,
2113 			pipe_name(crtc->pipe));
2114 		return false;
2115 	}
2116 
2117 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2118 
2119 	return true;
2120 }
2121 
2122 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2123 				    struct dpll *clk_div)
2124 {
2125 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2126 	int i;
2127 
2128 	*clk_div = bxt_dp_clk_val[0];
2129 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2130 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2131 			*clk_div = bxt_dp_clk_val[i];
2132 			break;
2133 		}
2134 	}
2135 
2136 	chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div);
2137 
2138 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2139 		    clk_div->dot != crtc_state->port_clock);
2140 }
2141 
2142 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2143 				      const struct dpll *clk_div)
2144 {
2145 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2146 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2147 	int clock = crtc_state->port_clock;
2148 	int vco = clk_div->vco;
2149 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2150 	u32 lanestagger;
2151 
2152 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2153 
2154 	if (vco >= 6200000 && vco <= 6700000) {
2155 		prop_coef = 4;
2156 		int_coef = 9;
2157 		gain_ctl = 3;
2158 		targ_cnt = 8;
2159 	} else if ((vco > 5400000 && vco < 6200000) ||
2160 			(vco >= 4800000 && vco < 5400000)) {
2161 		prop_coef = 5;
2162 		int_coef = 11;
2163 		gain_ctl = 3;
2164 		targ_cnt = 9;
2165 	} else if (vco == 5400000) {
2166 		prop_coef = 3;
2167 		int_coef = 8;
2168 		gain_ctl = 1;
2169 		targ_cnt = 9;
2170 	} else {
2171 		drm_err(&i915->drm, "Invalid VCO\n");
2172 		return false;
2173 	}
2174 
2175 	if (clock > 270000)
2176 		lanestagger = 0x18;
2177 	else if (clock > 135000)
2178 		lanestagger = 0x0d;
2179 	else if (clock > 67000)
2180 		lanestagger = 0x07;
2181 	else if (clock > 33000)
2182 		lanestagger = 0x04;
2183 	else
2184 		lanestagger = 0x02;
2185 
2186 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2187 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2188 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2189 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2190 
2191 	if (clk_div->m2 & 0x3fffff)
2192 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2193 
2194 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2195 		PORT_PLL_INT_COEFF(int_coef) |
2196 		PORT_PLL_GAIN_CTL(gain_ctl);
2197 
2198 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2199 
2200 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2201 
2202 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2203 		PORT_PLL_DCO_AMP_OVR_EN_H;
2204 
2205 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2206 
2207 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2208 
2209 	return true;
2210 }
2211 
2212 static bool
2213 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2214 {
2215 	struct dpll clk_div = {};
2216 
2217 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2218 
2219 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2220 }
2221 
2222 static bool
2223 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2224 {
2225 	struct dpll clk_div = {};
2226 
2227 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2228 
2229 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2230 }
2231 
2232 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2233 				const struct intel_shared_dpll *pll,
2234 				const struct intel_dpll_hw_state *pll_state)
2235 {
2236 	struct dpll clock;
2237 
2238 	clock.m1 = 2;
2239 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2240 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2241 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2242 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2243 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2244 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2245 
2246 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2247 }
2248 
2249 static bool bxt_get_dpll(struct intel_atomic_state *state,
2250 			 struct intel_crtc *crtc,
2251 			 struct intel_encoder *encoder)
2252 {
2253 	struct intel_crtc_state *crtc_state =
2254 		intel_atomic_get_new_crtc_state(state, crtc);
2255 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2256 	struct intel_shared_dpll *pll;
2257 	enum intel_dpll_id id;
2258 
2259 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2260 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2261 		return false;
2262 
2263 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2264 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2265 		return false;
2266 
2267 	/* 1:1 mapping between ports and PLLs */
2268 	id = (enum intel_dpll_id) encoder->port;
2269 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2270 
2271 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2272 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2273 
2274 	intel_reference_shared_dpll(state, crtc,
2275 				    pll, &crtc_state->dpll_hw_state);
2276 
2277 	crtc_state->shared_dpll = pll;
2278 
2279 	return true;
2280 }
2281 
2282 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2283 {
2284 	i915->dpll.ref_clks.ssc = 100000;
2285 	i915->dpll.ref_clks.nssc = 100000;
2286 	/* DSI non-SSC ref 19.2MHz */
2287 }
2288 
2289 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2290 			      const struct intel_dpll_hw_state *hw_state)
2291 {
2292 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2293 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2294 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2295 		    hw_state->ebb0,
2296 		    hw_state->ebb4,
2297 		    hw_state->pll0,
2298 		    hw_state->pll1,
2299 		    hw_state->pll2,
2300 		    hw_state->pll3,
2301 		    hw_state->pll6,
2302 		    hw_state->pll8,
2303 		    hw_state->pll9,
2304 		    hw_state->pll10,
2305 		    hw_state->pcsdw12);
2306 }
2307 
2308 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2309 	.enable = bxt_ddi_pll_enable,
2310 	.disable = bxt_ddi_pll_disable,
2311 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2312 	.get_freq = bxt_ddi_pll_get_freq,
2313 };
2314 
2315 static const struct dpll_info bxt_plls[] = {
2316 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2317 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2318 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2319 	{ },
2320 };
2321 
2322 static const struct intel_dpll_mgr bxt_pll_mgr = {
2323 	.dpll_info = bxt_plls,
2324 	.get_dplls = bxt_get_dpll,
2325 	.put_dplls = intel_put_dpll,
2326 	.update_ref_clks = bxt_update_dpll_ref_clks,
2327 	.dump_hw_state = bxt_dump_hw_state,
2328 };
2329 
2330 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2331 				      int *qdiv, int *kdiv)
2332 {
2333 	/* even dividers */
2334 	if (bestdiv % 2 == 0) {
2335 		if (bestdiv == 2) {
2336 			*pdiv = 2;
2337 			*qdiv = 1;
2338 			*kdiv = 1;
2339 		} else if (bestdiv % 4 == 0) {
2340 			*pdiv = 2;
2341 			*qdiv = bestdiv / 4;
2342 			*kdiv = 2;
2343 		} else if (bestdiv % 6 == 0) {
2344 			*pdiv = 3;
2345 			*qdiv = bestdiv / 6;
2346 			*kdiv = 2;
2347 		} else if (bestdiv % 5 == 0) {
2348 			*pdiv = 5;
2349 			*qdiv = bestdiv / 10;
2350 			*kdiv = 2;
2351 		} else if (bestdiv % 14 == 0) {
2352 			*pdiv = 7;
2353 			*qdiv = bestdiv / 14;
2354 			*kdiv = 2;
2355 		}
2356 	} else {
2357 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2358 			*pdiv = bestdiv;
2359 			*qdiv = 1;
2360 			*kdiv = 1;
2361 		} else { /* 9, 15, 21 */
2362 			*pdiv = bestdiv / 3;
2363 			*qdiv = 1;
2364 			*kdiv = 3;
2365 		}
2366 	}
2367 }
2368 
2369 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2370 				      u32 dco_freq, u32 ref_freq,
2371 				      int pdiv, int qdiv, int kdiv)
2372 {
2373 	u32 dco;
2374 
2375 	switch (kdiv) {
2376 	case 1:
2377 		params->kdiv = 1;
2378 		break;
2379 	case 2:
2380 		params->kdiv = 2;
2381 		break;
2382 	case 3:
2383 		params->kdiv = 4;
2384 		break;
2385 	default:
2386 		WARN(1, "Incorrect KDiv\n");
2387 	}
2388 
2389 	switch (pdiv) {
2390 	case 2:
2391 		params->pdiv = 1;
2392 		break;
2393 	case 3:
2394 		params->pdiv = 2;
2395 		break;
2396 	case 5:
2397 		params->pdiv = 4;
2398 		break;
2399 	case 7:
2400 		params->pdiv = 8;
2401 		break;
2402 	default:
2403 		WARN(1, "Incorrect PDiv\n");
2404 	}
2405 
2406 	WARN_ON(kdiv != 2 && qdiv != 1);
2407 
2408 	params->qdiv_ratio = qdiv;
2409 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2410 
2411 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2412 
2413 	params->dco_integer = dco >> 15;
2414 	params->dco_fraction = dco & 0x7fff;
2415 }
2416 
2417 /*
2418  * Display WA #22010492432: ehl, tgl, adl-p
2419  * Program half of the nominal DCO divider fraction value.
2420  */
2421 static bool
2422 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2423 {
2424 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2425 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2426 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
2427 		 i915->dpll.ref_clks.nssc == 38400;
2428 }
2429 
2430 struct icl_combo_pll_params {
2431 	int clock;
2432 	struct skl_wrpll_params wrpll;
2433 };
2434 
2435 /*
2436  * These values alrea already adjusted: they're the bits we write to the
2437  * registers, not the logical values.
2438  */
2439 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2440 	{ 540000,
2441 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2442 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2443 	{ 270000,
2444 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2445 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2446 	{ 162000,
2447 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2448 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2449 	{ 324000,
2450 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2451 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2452 	{ 216000,
2453 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2454 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2455 	{ 432000,
2456 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2457 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2458 	{ 648000,
2459 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2460 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2461 	{ 810000,
2462 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2463 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2464 };
2465 
2466 
2467 /* Also used for 38.4 MHz values. */
2468 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2469 	{ 540000,
2470 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2471 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2472 	{ 270000,
2473 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2474 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2475 	{ 162000,
2476 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2477 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2478 	{ 324000,
2479 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2480 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2481 	{ 216000,
2482 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2483 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2484 	{ 432000,
2485 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2486 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2487 	{ 648000,
2488 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2489 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2490 	{ 810000,
2491 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2492 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2493 };
2494 
2495 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2496 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2497 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2498 };
2499 
2500 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2501 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2502 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2503 };
2504 
2505 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2506 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2507 	/* the following params are unused */
2508 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2509 };
2510 
2511 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2512 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2513 	/* the following params are unused */
2514 };
2515 
2516 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2517 				  struct skl_wrpll_params *pll_params)
2518 {
2519 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2520 	const struct icl_combo_pll_params *params =
2521 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2522 		icl_dp_combo_pll_24MHz_values :
2523 		icl_dp_combo_pll_19_2MHz_values;
2524 	int clock = crtc_state->port_clock;
2525 	int i;
2526 
2527 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2528 		if (clock == params[i].clock) {
2529 			*pll_params = params[i].wrpll;
2530 			return true;
2531 		}
2532 	}
2533 
2534 	MISSING_CASE(clock);
2535 	return false;
2536 }
2537 
2538 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2539 			     struct skl_wrpll_params *pll_params)
2540 {
2541 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2542 
2543 	if (DISPLAY_VER(dev_priv) >= 12) {
2544 		switch (dev_priv->dpll.ref_clks.nssc) {
2545 		default:
2546 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2547 			fallthrough;
2548 		case 19200:
2549 		case 38400:
2550 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2551 			break;
2552 		case 24000:
2553 			*pll_params = tgl_tbt_pll_24MHz_values;
2554 			break;
2555 		}
2556 	} else {
2557 		switch (dev_priv->dpll.ref_clks.nssc) {
2558 		default:
2559 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2560 			fallthrough;
2561 		case 19200:
2562 		case 38400:
2563 			*pll_params = icl_tbt_pll_19_2MHz_values;
2564 			break;
2565 		case 24000:
2566 			*pll_params = icl_tbt_pll_24MHz_values;
2567 			break;
2568 		}
2569 	}
2570 
2571 	return true;
2572 }
2573 
2574 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2575 				    const struct intel_shared_dpll *pll,
2576 				    const struct intel_dpll_hw_state *pll_state)
2577 {
2578 	/*
2579 	 * The PLL outputs multiple frequencies at the same time, selection is
2580 	 * made at DDI clock mux level.
2581 	 */
2582 	drm_WARN_ON(&i915->drm, 1);
2583 
2584 	return 0;
2585 }
2586 
2587 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2588 {
2589 	int ref_clock = i915->dpll.ref_clks.nssc;
2590 
2591 	/*
2592 	 * For ICL+, the spec states: if reference frequency is 38.4,
2593 	 * use 19.2 because the DPLL automatically divides that by 2.
2594 	 */
2595 	if (ref_clock == 38400)
2596 		ref_clock = 19200;
2597 
2598 	return ref_clock;
2599 }
2600 
2601 static bool
2602 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2603 	       struct skl_wrpll_params *wrpll_params)
2604 {
2605 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2606 	int ref_clock = icl_wrpll_ref_clock(i915);
2607 	u32 afe_clock = crtc_state->port_clock * 5;
2608 	u32 dco_min = 7998000;
2609 	u32 dco_max = 10000000;
2610 	u32 dco_mid = (dco_min + dco_max) / 2;
2611 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2612 					 18, 20, 24, 28, 30, 32,  36,  40,
2613 					 42, 44, 48, 50, 52, 54,  56,  60,
2614 					 64, 66, 68, 70, 72, 76,  78,  80,
2615 					 84, 88, 90, 92, 96, 98, 100, 102,
2616 					  3,  5,  7,  9, 15, 21 };
2617 	u32 dco, best_dco = 0, dco_centrality = 0;
2618 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2619 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2620 
2621 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2622 		dco = afe_clock * dividers[d];
2623 
2624 		if (dco <= dco_max && dco >= dco_min) {
2625 			dco_centrality = abs(dco - dco_mid);
2626 
2627 			if (dco_centrality < best_dco_centrality) {
2628 				best_dco_centrality = dco_centrality;
2629 				best_div = dividers[d];
2630 				best_dco = dco;
2631 			}
2632 		}
2633 	}
2634 
2635 	if (best_div == 0)
2636 		return false;
2637 
2638 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2639 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2640 				  pdiv, qdiv, kdiv);
2641 
2642 	return true;
2643 }
2644 
2645 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2646 				      const struct intel_shared_dpll *pll,
2647 				      const struct intel_dpll_hw_state *pll_state)
2648 {
2649 	int ref_clock = icl_wrpll_ref_clock(i915);
2650 	u32 dco_fraction;
2651 	u32 p0, p1, p2, dco_freq;
2652 
2653 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2654 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2655 
2656 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2657 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2658 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2659 	else
2660 		p1 = 1;
2661 
2662 	switch (p0) {
2663 	case DPLL_CFGCR1_PDIV_2:
2664 		p0 = 2;
2665 		break;
2666 	case DPLL_CFGCR1_PDIV_3:
2667 		p0 = 3;
2668 		break;
2669 	case DPLL_CFGCR1_PDIV_5:
2670 		p0 = 5;
2671 		break;
2672 	case DPLL_CFGCR1_PDIV_7:
2673 		p0 = 7;
2674 		break;
2675 	}
2676 
2677 	switch (p2) {
2678 	case DPLL_CFGCR1_KDIV_1:
2679 		p2 = 1;
2680 		break;
2681 	case DPLL_CFGCR1_KDIV_2:
2682 		p2 = 2;
2683 		break;
2684 	case DPLL_CFGCR1_KDIV_3:
2685 		p2 = 3;
2686 		break;
2687 	}
2688 
2689 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2690 		   ref_clock;
2691 
2692 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2693 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2694 
2695 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2696 		dco_fraction *= 2;
2697 
2698 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2699 
2700 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2701 		return 0;
2702 
2703 	return dco_freq / (p0 * p1 * p2 * 5);
2704 }
2705 
2706 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2707 				const struct skl_wrpll_params *pll_params,
2708 				struct intel_dpll_hw_state *pll_state)
2709 {
2710 	u32 dco_fraction = pll_params->dco_fraction;
2711 
2712 	memset(pll_state, 0, sizeof(*pll_state));
2713 
2714 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2715 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2716 
2717 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2718 			    pll_params->dco_integer;
2719 
2720 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2721 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2722 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2723 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2724 
2725 	if (DISPLAY_VER(i915) >= 12)
2726 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2727 	else
2728 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2729 
2730 	if (i915->vbt.override_afc_startup)
2731 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
2732 }
2733 
2734 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2735 				     u32 *target_dco_khz,
2736 				     struct intel_dpll_hw_state *state,
2737 				     bool is_dkl)
2738 {
2739 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2740 	u32 dco_min_freq, dco_max_freq;
2741 	unsigned int i;
2742 	int div2;
2743 
2744 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2745 	dco_max_freq = is_dp ? 8100000 : 10000000;
2746 
2747 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2748 		int div1 = div1_vals[i];
2749 
2750 		for (div2 = 10; div2 > 0; div2--) {
2751 			int dco = div1 * div2 * clock_khz * 5;
2752 			int a_divratio, tlinedrv, inputsel;
2753 			u32 hsdiv;
2754 
2755 			if (dco < dco_min_freq || dco > dco_max_freq)
2756 				continue;
2757 
2758 			if (div2 >= 2) {
2759 				/*
2760 				 * Note: a_divratio not matching TGL BSpec
2761 				 * algorithm but matching hardcoded values and
2762 				 * working on HW for DP alt-mode at least
2763 				 */
2764 				a_divratio = is_dp ? 10 : 5;
2765 				tlinedrv = is_dkl ? 1 : 2;
2766 			} else {
2767 				a_divratio = 5;
2768 				tlinedrv = 0;
2769 			}
2770 			inputsel = is_dp ? 0 : 1;
2771 
2772 			switch (div1) {
2773 			default:
2774 				MISSING_CASE(div1);
2775 				fallthrough;
2776 			case 2:
2777 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2778 				break;
2779 			case 3:
2780 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2781 				break;
2782 			case 5:
2783 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2784 				break;
2785 			case 7:
2786 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2787 				break;
2788 			}
2789 
2790 			*target_dco_khz = dco;
2791 
2792 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2793 
2794 			state->mg_clktop2_coreclkctl1 =
2795 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2796 
2797 			state->mg_clktop2_hsclkctl =
2798 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2799 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2800 				hsdiv |
2801 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2802 
2803 			return true;
2804 		}
2805 	}
2806 
2807 	return false;
2808 }
2809 
2810 /*
2811  * The specification for this function uses real numbers, so the math had to be
2812  * adapted to integer-only calculation, that's why it looks so different.
2813  */
2814 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2815 				  struct intel_dpll_hw_state *pll_state)
2816 {
2817 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2818 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2819 	int clock = crtc_state->port_clock;
2820 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2821 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2822 	u32 prop_coeff, int_coeff;
2823 	u32 tdc_targetcnt, feedfwgain;
2824 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2825 	u64 tmp;
2826 	bool use_ssc = false;
2827 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2828 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2829 
2830 	memset(pll_state, 0, sizeof(*pll_state));
2831 
2832 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2833 				      pll_state, is_dkl)) {
2834 		drm_dbg_kms(&dev_priv->drm,
2835 			    "Failed to find divisors for clock %d\n", clock);
2836 		return false;
2837 	}
2838 
2839 	m1div = 2;
2840 	m2div_int = dco_khz / (refclk_khz * m1div);
2841 	if (m2div_int > 255) {
2842 		if (!is_dkl) {
2843 			m1div = 4;
2844 			m2div_int = dco_khz / (refclk_khz * m1div);
2845 		}
2846 
2847 		if (m2div_int > 255) {
2848 			drm_dbg_kms(&dev_priv->drm,
2849 				    "Failed to find mdiv for clock %d\n",
2850 				    clock);
2851 			return false;
2852 		}
2853 	}
2854 	m2div_rem = dco_khz % (refclk_khz * m1div);
2855 
2856 	tmp = (u64)m2div_rem * (1 << 22);
2857 	do_div(tmp, refclk_khz * m1div);
2858 	m2div_frac = tmp;
2859 
2860 	switch (refclk_khz) {
2861 	case 19200:
2862 		iref_ndiv = 1;
2863 		iref_trim = 28;
2864 		iref_pulse_w = 1;
2865 		break;
2866 	case 24000:
2867 		iref_ndiv = 1;
2868 		iref_trim = 25;
2869 		iref_pulse_w = 2;
2870 		break;
2871 	case 38400:
2872 		iref_ndiv = 2;
2873 		iref_trim = 28;
2874 		iref_pulse_w = 1;
2875 		break;
2876 	default:
2877 		MISSING_CASE(refclk_khz);
2878 		return false;
2879 	}
2880 
2881 	/*
2882 	 * tdc_res = 0.000003
2883 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2884 	 *
2885 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2886 	 * was supposed to be a division, but we rearranged the operations of
2887 	 * the formula to avoid early divisions so we don't multiply the
2888 	 * rounding errors.
2889 	 *
2890 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2891 	 * we also rearrange to work with integers.
2892 	 *
2893 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2894 	 * last division by 10.
2895 	 */
2896 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2897 
2898 	/*
2899 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2900 	 * 32 bits. That's not a problem since we round the division down
2901 	 * anyway.
2902 	 */
2903 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2904 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2905 
2906 	if (dco_khz >= 9000000) {
2907 		prop_coeff = 5;
2908 		int_coeff = 10;
2909 	} else {
2910 		prop_coeff = 4;
2911 		int_coeff = 8;
2912 	}
2913 
2914 	if (use_ssc) {
2915 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2916 		do_div(tmp, refclk_khz * m1div * 10000);
2917 		ssc_stepsize = tmp;
2918 
2919 		tmp = mul_u32_u32(dco_khz, 1000);
2920 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2921 	} else {
2922 		ssc_stepsize = 0;
2923 		ssc_steplen = 0;
2924 	}
2925 	ssc_steplog = 4;
2926 
2927 	/* write pll_state calculations */
2928 	if (is_dkl) {
2929 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2930 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2931 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2932 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2933 		if (dev_priv->vbt.override_afc_startup) {
2934 			u8 val = dev_priv->vbt.override_afc_startup_val;
2935 
2936 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2937 		}
2938 
2939 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2940 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2941 
2942 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2943 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2944 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2945 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2946 
2947 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2948 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2949 
2950 		pll_state->mg_pll_tdc_coldst_bias =
2951 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2952 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2953 
2954 	} else {
2955 		pll_state->mg_pll_div0 =
2956 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2957 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2958 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2959 
2960 		pll_state->mg_pll_div1 =
2961 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2962 			MG_PLL_DIV1_DITHER_DIV_2 |
2963 			MG_PLL_DIV1_NDIVRATIO(1) |
2964 			MG_PLL_DIV1_FBPREDIV(m1div);
2965 
2966 		pll_state->mg_pll_lf =
2967 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2968 			MG_PLL_LF_AFCCNTSEL_512 |
2969 			MG_PLL_LF_GAINCTRL(1) |
2970 			MG_PLL_LF_INT_COEFF(int_coeff) |
2971 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2972 
2973 		pll_state->mg_pll_frac_lock =
2974 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2975 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2976 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2977 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2978 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2979 		if (use_ssc || m2div_rem > 0)
2980 			pll_state->mg_pll_frac_lock |=
2981 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2982 
2983 		pll_state->mg_pll_ssc =
2984 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2985 			MG_PLL_SSC_TYPE(2) |
2986 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2987 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2988 			MG_PLL_SSC_FLLEN |
2989 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2990 
2991 		pll_state->mg_pll_tdc_coldst_bias =
2992 			MG_PLL_TDC_COLDST_COLDSTART |
2993 			MG_PLL_TDC_COLDST_IREFINT_EN |
2994 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2995 			MG_PLL_TDC_TDCOVCCORR_EN |
2996 			MG_PLL_TDC_TDCSEL(3);
2997 
2998 		pll_state->mg_pll_bias =
2999 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3000 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3001 			MG_PLL_BIAS_BIAS_BONUS(10) |
3002 			MG_PLL_BIAS_BIASCAL_EN |
3003 			MG_PLL_BIAS_CTRIM(12) |
3004 			MG_PLL_BIAS_VREF_RDAC(4) |
3005 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3006 
3007 		if (refclk_khz == 38400) {
3008 			pll_state->mg_pll_tdc_coldst_bias_mask =
3009 				MG_PLL_TDC_COLDST_COLDSTART;
3010 			pll_state->mg_pll_bias_mask = 0;
3011 		} else {
3012 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3013 			pll_state->mg_pll_bias_mask = -1U;
3014 		}
3015 
3016 		pll_state->mg_pll_tdc_coldst_bias &=
3017 			pll_state->mg_pll_tdc_coldst_bias_mask;
3018 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3019 	}
3020 
3021 	return true;
3022 }
3023 
3024 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3025 				   const struct intel_shared_dpll *pll,
3026 				   const struct intel_dpll_hw_state *pll_state)
3027 {
3028 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3029 	u64 tmp;
3030 
3031 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3032 
3033 	if (DISPLAY_VER(dev_priv) >= 12) {
3034 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3035 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3036 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3037 
3038 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3039 			m2_frac = pll_state->mg_pll_bias &
3040 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3041 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3042 		} else {
3043 			m2_frac = 0;
3044 		}
3045 	} else {
3046 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3047 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3048 
3049 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3050 			m2_frac = pll_state->mg_pll_div0 &
3051 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3052 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3053 		} else {
3054 			m2_frac = 0;
3055 		}
3056 	}
3057 
3058 	switch (pll_state->mg_clktop2_hsclkctl &
3059 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3060 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3061 		div1 = 2;
3062 		break;
3063 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3064 		div1 = 3;
3065 		break;
3066 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3067 		div1 = 5;
3068 		break;
3069 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3070 		div1 = 7;
3071 		break;
3072 	default:
3073 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3074 		return 0;
3075 	}
3076 
3077 	div2 = (pll_state->mg_clktop2_hsclkctl &
3078 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3079 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3080 
3081 	/* div2 value of 0 is same as 1 means no div */
3082 	if (div2 == 0)
3083 		div2 = 1;
3084 
3085 	/*
3086 	 * Adjust the original formula to delay the division by 2^22 in order to
3087 	 * minimize possible rounding errors.
3088 	 */
3089 	tmp = (u64)m1 * m2_int * ref_clock +
3090 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3091 	tmp = div_u64(tmp, 5 * div1 * div2);
3092 
3093 	return tmp;
3094 }
3095 
3096 /**
3097  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3098  * @crtc_state: state for the CRTC to select the DPLL for
3099  * @port_dpll_id: the active @port_dpll_id to select
3100  *
3101  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3102  * CRTC.
3103  */
3104 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3105 			      enum icl_port_dpll_id port_dpll_id)
3106 {
3107 	struct icl_port_dpll *port_dpll =
3108 		&crtc_state->icl_port_dplls[port_dpll_id];
3109 
3110 	crtc_state->shared_dpll = port_dpll->pll;
3111 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3112 }
3113 
3114 static void icl_update_active_dpll(struct intel_atomic_state *state,
3115 				   struct intel_crtc *crtc,
3116 				   struct intel_encoder *encoder)
3117 {
3118 	struct intel_crtc_state *crtc_state =
3119 		intel_atomic_get_new_crtc_state(state, crtc);
3120 	struct intel_digital_port *primary_port;
3121 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3122 
3123 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3124 		enc_to_mst(encoder)->primary :
3125 		enc_to_dig_port(encoder);
3126 
3127 	if (primary_port &&
3128 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3129 	     intel_tc_port_in_legacy_mode(primary_port)))
3130 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3131 
3132 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3133 }
3134 
3135 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3136 {
3137 	if (!(i915->hti_state & HDPORT_ENABLED))
3138 		return 0;
3139 
3140 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3141 }
3142 
3143 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3144 				   struct intel_crtc *crtc,
3145 				   struct intel_encoder *encoder)
3146 {
3147 	struct intel_crtc_state *crtc_state =
3148 		intel_atomic_get_new_crtc_state(state, crtc);
3149 	struct skl_wrpll_params pll_params = { };
3150 	struct icl_port_dpll *port_dpll =
3151 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3152 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3153 	enum port port = encoder->port;
3154 	unsigned long dpll_mask;
3155 	int ret;
3156 
3157 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3158 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3159 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3160 	else
3161 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3162 
3163 	if (!ret) {
3164 		drm_dbg_kms(&dev_priv->drm,
3165 			    "Could not calculate combo PHY PLL state.\n");
3166 
3167 		return false;
3168 	}
3169 
3170 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3171 
3172 	if (IS_ALDERLAKE_S(dev_priv)) {
3173 		dpll_mask =
3174 			BIT(DPLL_ID_DG1_DPLL3) |
3175 			BIT(DPLL_ID_DG1_DPLL2) |
3176 			BIT(DPLL_ID_ICL_DPLL1) |
3177 			BIT(DPLL_ID_ICL_DPLL0);
3178 	} else if (IS_DG1(dev_priv)) {
3179 		if (port == PORT_D || port == PORT_E) {
3180 			dpll_mask =
3181 				BIT(DPLL_ID_DG1_DPLL2) |
3182 				BIT(DPLL_ID_DG1_DPLL3);
3183 		} else {
3184 			dpll_mask =
3185 				BIT(DPLL_ID_DG1_DPLL0) |
3186 				BIT(DPLL_ID_DG1_DPLL1);
3187 		}
3188 	} else if (IS_ROCKETLAKE(dev_priv)) {
3189 		dpll_mask =
3190 			BIT(DPLL_ID_EHL_DPLL4) |
3191 			BIT(DPLL_ID_ICL_DPLL1) |
3192 			BIT(DPLL_ID_ICL_DPLL0);
3193 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3194 		dpll_mask =
3195 			BIT(DPLL_ID_EHL_DPLL4) |
3196 			BIT(DPLL_ID_ICL_DPLL1) |
3197 			BIT(DPLL_ID_ICL_DPLL0);
3198 	} else {
3199 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3200 	}
3201 
3202 	/* Eliminate DPLLs from consideration if reserved by HTI */
3203 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3204 
3205 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3206 						&port_dpll->hw_state,
3207 						dpll_mask);
3208 	if (!port_dpll->pll) {
3209 		drm_dbg_kms(&dev_priv->drm,
3210 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3211 			    encoder->base.base.id, encoder->base.name);
3212 		return false;
3213 	}
3214 
3215 	intel_reference_shared_dpll(state, crtc,
3216 				    port_dpll->pll, &port_dpll->hw_state);
3217 
3218 	icl_update_active_dpll(state, crtc, encoder);
3219 
3220 	return true;
3221 }
3222 
3223 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3224 				 struct intel_crtc *crtc,
3225 				 struct intel_encoder *encoder)
3226 {
3227 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3228 	struct intel_crtc_state *crtc_state =
3229 		intel_atomic_get_new_crtc_state(state, crtc);
3230 	struct skl_wrpll_params pll_params = { };
3231 	struct icl_port_dpll *port_dpll;
3232 	enum intel_dpll_id dpll_id;
3233 
3234 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3235 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3236 		drm_dbg_kms(&dev_priv->drm,
3237 			    "Could not calculate TBT PLL state.\n");
3238 		return false;
3239 	}
3240 
3241 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3242 
3243 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3244 						&port_dpll->hw_state,
3245 						BIT(DPLL_ID_ICL_TBTPLL));
3246 	if (!port_dpll->pll) {
3247 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3248 		return false;
3249 	}
3250 	intel_reference_shared_dpll(state, crtc,
3251 				    port_dpll->pll, &port_dpll->hw_state);
3252 
3253 
3254 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3255 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3256 		drm_dbg_kms(&dev_priv->drm,
3257 			    "Could not calculate MG PHY PLL state.\n");
3258 		goto err_unreference_tbt_pll;
3259 	}
3260 
3261 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3262 							 encoder->port));
3263 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3264 						&port_dpll->hw_state,
3265 						BIT(dpll_id));
3266 	if (!port_dpll->pll) {
3267 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3268 		goto err_unreference_tbt_pll;
3269 	}
3270 	intel_reference_shared_dpll(state, crtc,
3271 				    port_dpll->pll, &port_dpll->hw_state);
3272 
3273 	icl_update_active_dpll(state, crtc, encoder);
3274 
3275 	return true;
3276 
3277 err_unreference_tbt_pll:
3278 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3279 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3280 
3281 	return false;
3282 }
3283 
3284 static bool icl_get_dplls(struct intel_atomic_state *state,
3285 			  struct intel_crtc *crtc,
3286 			  struct intel_encoder *encoder)
3287 {
3288 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3289 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3290 
3291 	if (intel_phy_is_combo(dev_priv, phy))
3292 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3293 	else if (intel_phy_is_tc(dev_priv, phy))
3294 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3295 
3296 	MISSING_CASE(phy);
3297 
3298 	return false;
3299 }
3300 
3301 static void icl_put_dplls(struct intel_atomic_state *state,
3302 			  struct intel_crtc *crtc)
3303 {
3304 	const struct intel_crtc_state *old_crtc_state =
3305 		intel_atomic_get_old_crtc_state(state, crtc);
3306 	struct intel_crtc_state *new_crtc_state =
3307 		intel_atomic_get_new_crtc_state(state, crtc);
3308 	enum icl_port_dpll_id id;
3309 
3310 	new_crtc_state->shared_dpll = NULL;
3311 
3312 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3313 		const struct icl_port_dpll *old_port_dpll =
3314 			&old_crtc_state->icl_port_dplls[id];
3315 		struct icl_port_dpll *new_port_dpll =
3316 			&new_crtc_state->icl_port_dplls[id];
3317 
3318 		new_port_dpll->pll = NULL;
3319 
3320 		if (!old_port_dpll->pll)
3321 			continue;
3322 
3323 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3324 	}
3325 }
3326 
3327 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3328 				struct intel_shared_dpll *pll,
3329 				struct intel_dpll_hw_state *hw_state)
3330 {
3331 	const enum intel_dpll_id id = pll->info->id;
3332 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3333 	intel_wakeref_t wakeref;
3334 	bool ret = false;
3335 	u32 val;
3336 
3337 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3338 
3339 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3340 						     POWER_DOMAIN_DISPLAY_CORE);
3341 	if (!wakeref)
3342 		return false;
3343 
3344 	val = intel_de_read(dev_priv, enable_reg);
3345 	if (!(val & PLL_ENABLE))
3346 		goto out;
3347 
3348 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3349 						  MG_REFCLKIN_CTL(tc_port));
3350 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3351 
3352 	hw_state->mg_clktop2_coreclkctl1 =
3353 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3354 	hw_state->mg_clktop2_coreclkctl1 &=
3355 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3356 
3357 	hw_state->mg_clktop2_hsclkctl =
3358 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3359 	hw_state->mg_clktop2_hsclkctl &=
3360 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3361 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3362 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3363 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3364 
3365 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3366 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3367 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3368 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3369 						   MG_PLL_FRAC_LOCK(tc_port));
3370 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3371 
3372 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3373 	hw_state->mg_pll_tdc_coldst_bias =
3374 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3375 
3376 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3377 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3378 		hw_state->mg_pll_bias_mask = 0;
3379 	} else {
3380 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3381 		hw_state->mg_pll_bias_mask = -1U;
3382 	}
3383 
3384 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3385 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3386 
3387 	ret = true;
3388 out:
3389 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3390 	return ret;
3391 }
3392 
3393 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3394 				 struct intel_shared_dpll *pll,
3395 				 struct intel_dpll_hw_state *hw_state)
3396 {
3397 	const enum intel_dpll_id id = pll->info->id;
3398 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3399 	intel_wakeref_t wakeref;
3400 	bool ret = false;
3401 	u32 val;
3402 
3403 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3404 						     POWER_DOMAIN_DISPLAY_CORE);
3405 	if (!wakeref)
3406 		return false;
3407 
3408 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3409 	if (!(val & PLL_ENABLE))
3410 		goto out;
3411 
3412 	/*
3413 	 * All registers read here have the same HIP_INDEX_REG even though
3414 	 * they are on different building blocks
3415 	 */
3416 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3417 		       HIP_INDEX_VAL(tc_port, 0x2));
3418 
3419 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3420 						  DKL_REFCLKIN_CTL(tc_port));
3421 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3422 
3423 	hw_state->mg_clktop2_hsclkctl =
3424 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3425 	hw_state->mg_clktop2_hsclkctl &=
3426 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3427 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3428 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3429 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3430 
3431 	hw_state->mg_clktop2_coreclkctl1 =
3432 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3433 	hw_state->mg_clktop2_coreclkctl1 &=
3434 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3435 
3436 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3437 	val = DKL_PLL_DIV0_MASK;
3438 	if (dev_priv->vbt.override_afc_startup)
3439 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3440 	hw_state->mg_pll_div0 &= val;
3441 
3442 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3443 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3444 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3445 
3446 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3447 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3448 				 DKL_PLL_SSC_STEP_LEN_MASK |
3449 				 DKL_PLL_SSC_STEP_NUM_MASK |
3450 				 DKL_PLL_SSC_EN);
3451 
3452 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3453 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3454 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3455 
3456 	hw_state->mg_pll_tdc_coldst_bias =
3457 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3458 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3459 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3460 
3461 	ret = true;
3462 out:
3463 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3464 	return ret;
3465 }
3466 
3467 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3468 				 struct intel_shared_dpll *pll,
3469 				 struct intel_dpll_hw_state *hw_state,
3470 				 i915_reg_t enable_reg)
3471 {
3472 	const enum intel_dpll_id id = pll->info->id;
3473 	intel_wakeref_t wakeref;
3474 	bool ret = false;
3475 	u32 val;
3476 
3477 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3478 						     POWER_DOMAIN_DISPLAY_CORE);
3479 	if (!wakeref)
3480 		return false;
3481 
3482 	val = intel_de_read(dev_priv, enable_reg);
3483 	if (!(val & PLL_ENABLE))
3484 		goto out;
3485 
3486 	if (IS_ALDERLAKE_S(dev_priv)) {
3487 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3488 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3489 	} else if (IS_DG1(dev_priv)) {
3490 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3491 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3492 	} else if (IS_ROCKETLAKE(dev_priv)) {
3493 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3494 						 RKL_DPLL_CFGCR0(id));
3495 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3496 						 RKL_DPLL_CFGCR1(id));
3497 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3498 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3499 						 TGL_DPLL_CFGCR0(id));
3500 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3501 						 TGL_DPLL_CFGCR1(id));
3502 		if (dev_priv->vbt.override_afc_startup) {
3503 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3504 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3505 		}
3506 	} else {
3507 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3508 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3509 							 ICL_DPLL_CFGCR0(4));
3510 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3511 							 ICL_DPLL_CFGCR1(4));
3512 		} else {
3513 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3514 							 ICL_DPLL_CFGCR0(id));
3515 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3516 							 ICL_DPLL_CFGCR1(id));
3517 		}
3518 	}
3519 
3520 	ret = true;
3521 out:
3522 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3523 	return ret;
3524 }
3525 
3526 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3527 				   struct intel_shared_dpll *pll,
3528 				   struct intel_dpll_hw_state *hw_state)
3529 {
3530 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3531 
3532 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3533 }
3534 
3535 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3536 				 struct intel_shared_dpll *pll,
3537 				 struct intel_dpll_hw_state *hw_state)
3538 {
3539 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3540 }
3541 
3542 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3543 			   struct intel_shared_dpll *pll)
3544 {
3545 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3546 	const enum intel_dpll_id id = pll->info->id;
3547 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3548 
3549 	if (IS_ALDERLAKE_S(dev_priv)) {
3550 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3551 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3552 	} else if (IS_DG1(dev_priv)) {
3553 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3554 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3555 	} else if (IS_ROCKETLAKE(dev_priv)) {
3556 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3557 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3558 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3559 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3560 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3561 		div0_reg = TGL_DPLL0_DIV0(id);
3562 	} else {
3563 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3564 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3565 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3566 		} else {
3567 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3568 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3569 		}
3570 	}
3571 
3572 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3573 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3574 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
3575 			 !i915_mmio_reg_valid(div0_reg));
3576 	if (dev_priv->vbt.override_afc_startup &&
3577 	    i915_mmio_reg_valid(div0_reg))
3578 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3579 			     hw_state->div0);
3580 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3581 }
3582 
3583 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3584 			     struct intel_shared_dpll *pll)
3585 {
3586 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3587 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3588 	u32 val;
3589 
3590 	/*
3591 	 * Some of the following registers have reserved fields, so program
3592 	 * these with RMW based on a mask. The mask can be fixed or generated
3593 	 * during the calc/readout phase if the mask depends on some other HW
3594 	 * state like refclk, see icl_calc_mg_pll_state().
3595 	 */
3596 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3597 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3598 	val |= hw_state->mg_refclkin_ctl;
3599 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3600 
3601 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3602 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3603 	val |= hw_state->mg_clktop2_coreclkctl1;
3604 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3605 
3606 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3607 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3608 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3609 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3610 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3611 	val |= hw_state->mg_clktop2_hsclkctl;
3612 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3613 
3614 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3615 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3616 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3617 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3618 		       hw_state->mg_pll_frac_lock);
3619 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3620 
3621 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3622 	val &= ~hw_state->mg_pll_bias_mask;
3623 	val |= hw_state->mg_pll_bias;
3624 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3625 
3626 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3627 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3628 	val |= hw_state->mg_pll_tdc_coldst_bias;
3629 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3630 
3631 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3632 }
3633 
3634 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3635 			  struct intel_shared_dpll *pll)
3636 {
3637 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3638 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3639 	u32 val;
3640 
3641 	/*
3642 	 * All registers programmed here have the same HIP_INDEX_REG even
3643 	 * though on different building block
3644 	 */
3645 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3646 		       HIP_INDEX_VAL(tc_port, 0x2));
3647 
3648 	/* All the registers are RMW */
3649 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3650 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3651 	val |= hw_state->mg_refclkin_ctl;
3652 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3653 
3654 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3655 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3656 	val |= hw_state->mg_clktop2_coreclkctl1;
3657 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3658 
3659 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3660 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3661 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3662 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3663 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3664 	val |= hw_state->mg_clktop2_hsclkctl;
3665 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3666 
3667 	val = DKL_PLL_DIV0_MASK;
3668 	if (dev_priv->vbt.override_afc_startup)
3669 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3670 	intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3671 		     hw_state->mg_pll_div0);
3672 
3673 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3674 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3675 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3676 	val |= hw_state->mg_pll_div1;
3677 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3678 
3679 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3680 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3681 		 DKL_PLL_SSC_STEP_LEN_MASK |
3682 		 DKL_PLL_SSC_STEP_NUM_MASK |
3683 		 DKL_PLL_SSC_EN);
3684 	val |= hw_state->mg_pll_ssc;
3685 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3686 
3687 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3688 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3689 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3690 	val |= hw_state->mg_pll_bias;
3691 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3692 
3693 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3694 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3695 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3696 	val |= hw_state->mg_pll_tdc_coldst_bias;
3697 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3698 
3699 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3700 }
3701 
3702 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3703 				 struct intel_shared_dpll *pll,
3704 				 i915_reg_t enable_reg)
3705 {
3706 	u32 val;
3707 
3708 	val = intel_de_read(dev_priv, enable_reg);
3709 	val |= PLL_POWER_ENABLE;
3710 	intel_de_write(dev_priv, enable_reg, val);
3711 
3712 	/*
3713 	 * The spec says we need to "wait" but it also says it should be
3714 	 * immediate.
3715 	 */
3716 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3717 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3718 			pll->info->id);
3719 }
3720 
3721 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3722 			   struct intel_shared_dpll *pll,
3723 			   i915_reg_t enable_reg)
3724 {
3725 	u32 val;
3726 
3727 	val = intel_de_read(dev_priv, enable_reg);
3728 	val |= PLL_ENABLE;
3729 	intel_de_write(dev_priv, enable_reg, val);
3730 
3731 	/* Timeout is actually 600us. */
3732 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3733 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3734 }
3735 
3736 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3737 {
3738 	u32 val;
3739 
3740 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3741 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3742 		return;
3743 	/*
3744 	 * Wa_16011069516:adl-p[a0]
3745 	 *
3746 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3747 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3748 	 * sanity check this assumption with a double read, which presumably
3749 	 * returns the correct value even with clock gating on.
3750 	 *
3751 	 * Instead of the usual place for workarounds we apply this one here,
3752 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3753 	 */
3754 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3755 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3756 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3757 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3758 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3759 }
3760 
3761 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3762 			     struct intel_shared_dpll *pll)
3763 {
3764 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3765 
3766 	if (IS_JSL_EHL(dev_priv) &&
3767 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3768 
3769 		/*
3770 		 * We need to disable DC states when this DPLL is enabled.
3771 		 * This can be done by taking a reference on DPLL4 power
3772 		 * domain.
3773 		 */
3774 		pll->wakeref = intel_display_power_get(dev_priv,
3775 						       POWER_DOMAIN_DC_OFF);
3776 	}
3777 
3778 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3779 
3780 	icl_dpll_write(dev_priv, pll);
3781 
3782 	/*
3783 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3784 	 * paths should already be setting the appropriate voltage, hence we do
3785 	 * nothing here.
3786 	 */
3787 
3788 	icl_pll_enable(dev_priv, pll, enable_reg);
3789 
3790 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3791 
3792 	/* DVFS post sequence would be here. See the comment above. */
3793 }
3794 
3795 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3796 			   struct intel_shared_dpll *pll)
3797 {
3798 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3799 
3800 	icl_dpll_write(dev_priv, pll);
3801 
3802 	/*
3803 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3804 	 * paths should already be setting the appropriate voltage, hence we do
3805 	 * nothing here.
3806 	 */
3807 
3808 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3809 
3810 	/* DVFS post sequence would be here. See the comment above. */
3811 }
3812 
3813 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3814 			  struct intel_shared_dpll *pll)
3815 {
3816 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3817 
3818 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3819 
3820 	if (DISPLAY_VER(dev_priv) >= 12)
3821 		dkl_pll_write(dev_priv, pll);
3822 	else
3823 		icl_mg_pll_write(dev_priv, pll);
3824 
3825 	/*
3826 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3827 	 * paths should already be setting the appropriate voltage, hence we do
3828 	 * nothing here.
3829 	 */
3830 
3831 	icl_pll_enable(dev_priv, pll, enable_reg);
3832 
3833 	/* DVFS post sequence would be here. See the comment above. */
3834 }
3835 
3836 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3837 			    struct intel_shared_dpll *pll,
3838 			    i915_reg_t enable_reg)
3839 {
3840 	u32 val;
3841 
3842 	/* The first steps are done by intel_ddi_post_disable(). */
3843 
3844 	/*
3845 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3846 	 * paths should already be setting the appropriate voltage, hence we do
3847 	 * nothing here.
3848 	 */
3849 
3850 	val = intel_de_read(dev_priv, enable_reg);
3851 	val &= ~PLL_ENABLE;
3852 	intel_de_write(dev_priv, enable_reg, val);
3853 
3854 	/* Timeout is actually 1us. */
3855 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3856 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3857 
3858 	/* DVFS post sequence would be here. See the comment above. */
3859 
3860 	val = intel_de_read(dev_priv, enable_reg);
3861 	val &= ~PLL_POWER_ENABLE;
3862 	intel_de_write(dev_priv, enable_reg, val);
3863 
3864 	/*
3865 	 * The spec says we need to "wait" but it also says it should be
3866 	 * immediate.
3867 	 */
3868 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3869 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3870 			pll->info->id);
3871 }
3872 
3873 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3874 			      struct intel_shared_dpll *pll)
3875 {
3876 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3877 
3878 	icl_pll_disable(dev_priv, pll, enable_reg);
3879 
3880 	if (IS_JSL_EHL(dev_priv) &&
3881 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3882 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3883 					pll->wakeref);
3884 }
3885 
3886 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3887 			    struct intel_shared_dpll *pll)
3888 {
3889 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3890 }
3891 
3892 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3893 			   struct intel_shared_dpll *pll)
3894 {
3895 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3896 
3897 	icl_pll_disable(dev_priv, pll, enable_reg);
3898 }
3899 
3900 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3901 {
3902 	/* No SSC ref */
3903 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3904 }
3905 
3906 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3907 			      const struct intel_dpll_hw_state *hw_state)
3908 {
3909 	drm_dbg_kms(&dev_priv->drm,
3910 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3911 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3912 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3913 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3914 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3915 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3916 		    hw_state->cfgcr0, hw_state->cfgcr1,
3917 		    hw_state->div0,
3918 		    hw_state->mg_refclkin_ctl,
3919 		    hw_state->mg_clktop2_coreclkctl1,
3920 		    hw_state->mg_clktop2_hsclkctl,
3921 		    hw_state->mg_pll_div0,
3922 		    hw_state->mg_pll_div1,
3923 		    hw_state->mg_pll_lf,
3924 		    hw_state->mg_pll_frac_lock,
3925 		    hw_state->mg_pll_ssc,
3926 		    hw_state->mg_pll_bias,
3927 		    hw_state->mg_pll_tdc_coldst_bias);
3928 }
3929 
3930 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3931 	.enable = combo_pll_enable,
3932 	.disable = combo_pll_disable,
3933 	.get_hw_state = combo_pll_get_hw_state,
3934 	.get_freq = icl_ddi_combo_pll_get_freq,
3935 };
3936 
3937 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3938 	.enable = tbt_pll_enable,
3939 	.disable = tbt_pll_disable,
3940 	.get_hw_state = tbt_pll_get_hw_state,
3941 	.get_freq = icl_ddi_tbt_pll_get_freq,
3942 };
3943 
3944 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3945 	.enable = mg_pll_enable,
3946 	.disable = mg_pll_disable,
3947 	.get_hw_state = mg_pll_get_hw_state,
3948 	.get_freq = icl_ddi_mg_pll_get_freq,
3949 };
3950 
3951 static const struct dpll_info icl_plls[] = {
3952 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3953 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3954 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3955 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3956 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3957 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3958 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3959 	{ },
3960 };
3961 
3962 static const struct intel_dpll_mgr icl_pll_mgr = {
3963 	.dpll_info = icl_plls,
3964 	.get_dplls = icl_get_dplls,
3965 	.put_dplls = icl_put_dplls,
3966 	.update_active_dpll = icl_update_active_dpll,
3967 	.update_ref_clks = icl_update_dpll_ref_clks,
3968 	.dump_hw_state = icl_dump_hw_state,
3969 };
3970 
3971 static const struct dpll_info ehl_plls[] = {
3972 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3973 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3974 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3975 	{ },
3976 };
3977 
3978 static const struct intel_dpll_mgr ehl_pll_mgr = {
3979 	.dpll_info = ehl_plls,
3980 	.get_dplls = icl_get_dplls,
3981 	.put_dplls = icl_put_dplls,
3982 	.update_ref_clks = icl_update_dpll_ref_clks,
3983 	.dump_hw_state = icl_dump_hw_state,
3984 };
3985 
3986 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3987 	.enable = mg_pll_enable,
3988 	.disable = mg_pll_disable,
3989 	.get_hw_state = dkl_pll_get_hw_state,
3990 	.get_freq = icl_ddi_mg_pll_get_freq,
3991 };
3992 
3993 static const struct dpll_info tgl_plls[] = {
3994 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3995 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3996 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3997 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3998 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3999 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4000 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4001 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4002 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4003 	{ },
4004 };
4005 
4006 static const struct intel_dpll_mgr tgl_pll_mgr = {
4007 	.dpll_info = tgl_plls,
4008 	.get_dplls = icl_get_dplls,
4009 	.put_dplls = icl_put_dplls,
4010 	.update_active_dpll = icl_update_active_dpll,
4011 	.update_ref_clks = icl_update_dpll_ref_clks,
4012 	.dump_hw_state = icl_dump_hw_state,
4013 };
4014 
4015 static const struct dpll_info rkl_plls[] = {
4016 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4017 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4018 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4019 	{ },
4020 };
4021 
4022 static const struct intel_dpll_mgr rkl_pll_mgr = {
4023 	.dpll_info = rkl_plls,
4024 	.get_dplls = icl_get_dplls,
4025 	.put_dplls = icl_put_dplls,
4026 	.update_ref_clks = icl_update_dpll_ref_clks,
4027 	.dump_hw_state = icl_dump_hw_state,
4028 };
4029 
4030 static const struct dpll_info dg1_plls[] = {
4031 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4032 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4033 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4034 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4035 	{ },
4036 };
4037 
4038 static const struct intel_dpll_mgr dg1_pll_mgr = {
4039 	.dpll_info = dg1_plls,
4040 	.get_dplls = icl_get_dplls,
4041 	.put_dplls = icl_put_dplls,
4042 	.update_ref_clks = icl_update_dpll_ref_clks,
4043 	.dump_hw_state = icl_dump_hw_state,
4044 };
4045 
4046 static const struct dpll_info adls_plls[] = {
4047 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4048 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4049 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4050 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4051 	{ },
4052 };
4053 
4054 static const struct intel_dpll_mgr adls_pll_mgr = {
4055 	.dpll_info = adls_plls,
4056 	.get_dplls = icl_get_dplls,
4057 	.put_dplls = icl_put_dplls,
4058 	.update_ref_clks = icl_update_dpll_ref_clks,
4059 	.dump_hw_state = icl_dump_hw_state,
4060 };
4061 
4062 static const struct dpll_info adlp_plls[] = {
4063 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4064 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4065 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4066 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4067 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4068 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4069 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4070 	{ },
4071 };
4072 
4073 static const struct intel_dpll_mgr adlp_pll_mgr = {
4074 	.dpll_info = adlp_plls,
4075 	.get_dplls = icl_get_dplls,
4076 	.put_dplls = icl_put_dplls,
4077 	.update_active_dpll = icl_update_active_dpll,
4078 	.update_ref_clks = icl_update_dpll_ref_clks,
4079 	.dump_hw_state = icl_dump_hw_state,
4080 };
4081 
4082 /**
4083  * intel_shared_dpll_init - Initialize shared DPLLs
4084  * @dev: drm device
4085  *
4086  * Initialize shared DPLLs for @dev.
4087  */
4088 void intel_shared_dpll_init(struct drm_device *dev)
4089 {
4090 	struct drm_i915_private *dev_priv = to_i915(dev);
4091 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4092 	const struct dpll_info *dpll_info;
4093 	int i;
4094 
4095 	if (IS_DG2(dev_priv))
4096 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4097 		dpll_mgr = NULL;
4098 	else if (IS_ALDERLAKE_P(dev_priv))
4099 		dpll_mgr = &adlp_pll_mgr;
4100 	else if (IS_ALDERLAKE_S(dev_priv))
4101 		dpll_mgr = &adls_pll_mgr;
4102 	else if (IS_DG1(dev_priv))
4103 		dpll_mgr = &dg1_pll_mgr;
4104 	else if (IS_ROCKETLAKE(dev_priv))
4105 		dpll_mgr = &rkl_pll_mgr;
4106 	else if (DISPLAY_VER(dev_priv) >= 12)
4107 		dpll_mgr = &tgl_pll_mgr;
4108 	else if (IS_JSL_EHL(dev_priv))
4109 		dpll_mgr = &ehl_pll_mgr;
4110 	else if (DISPLAY_VER(dev_priv) >= 11)
4111 		dpll_mgr = &icl_pll_mgr;
4112 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4113 		dpll_mgr = &bxt_pll_mgr;
4114 	else if (DISPLAY_VER(dev_priv) == 9)
4115 		dpll_mgr = &skl_pll_mgr;
4116 	else if (HAS_DDI(dev_priv))
4117 		dpll_mgr = &hsw_pll_mgr;
4118 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4119 		dpll_mgr = &pch_pll_mgr;
4120 
4121 	if (!dpll_mgr) {
4122 		dev_priv->dpll.num_shared_dpll = 0;
4123 		return;
4124 	}
4125 
4126 	dpll_info = dpll_mgr->dpll_info;
4127 
4128 	for (i = 0; dpll_info[i].name; i++) {
4129 		drm_WARN_ON(dev, i != dpll_info[i].id);
4130 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4131 	}
4132 
4133 	dev_priv->dpll.mgr = dpll_mgr;
4134 	dev_priv->dpll.num_shared_dpll = i;
4135 	mutex_init(&dev_priv->dpll.lock);
4136 
4137 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4138 }
4139 
4140 /**
4141  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4142  * @state: atomic state
4143  * @crtc: CRTC to reserve DPLLs for
4144  * @encoder: encoder
4145  *
4146  * This function reserves all required DPLLs for the given CRTC and encoder
4147  * combination in the current atomic commit @state and the new @crtc atomic
4148  * state.
4149  *
4150  * The new configuration in the atomic commit @state is made effective by
4151  * calling intel_shared_dpll_swap_state().
4152  *
4153  * The reserved DPLLs should be released by calling
4154  * intel_release_shared_dplls().
4155  *
4156  * Returns:
4157  * True if all required DPLLs were successfully reserved.
4158  */
4159 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4160 				struct intel_crtc *crtc,
4161 				struct intel_encoder *encoder)
4162 {
4163 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4164 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4165 
4166 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4167 		return false;
4168 
4169 	return dpll_mgr->get_dplls(state, crtc, encoder);
4170 }
4171 
4172 /**
4173  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4174  * @state: atomic state
4175  * @crtc: crtc from which the DPLLs are to be released
4176  *
4177  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4178  * from the current atomic commit @state and the old @crtc atomic state.
4179  *
4180  * The new configuration in the atomic commit @state is made effective by
4181  * calling intel_shared_dpll_swap_state().
4182  */
4183 void intel_release_shared_dplls(struct intel_atomic_state *state,
4184 				struct intel_crtc *crtc)
4185 {
4186 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4187 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4188 
4189 	/*
4190 	 * FIXME: this function is called for every platform having a
4191 	 * compute_clock hook, even though the platform doesn't yet support
4192 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4193 	 * called on those.
4194 	 */
4195 	if (!dpll_mgr)
4196 		return;
4197 
4198 	dpll_mgr->put_dplls(state, crtc);
4199 }
4200 
4201 /**
4202  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4203  * @state: atomic state
4204  * @crtc: the CRTC for which to update the active DPLL
4205  * @encoder: encoder determining the type of port DPLL
4206  *
4207  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4208  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4209  * DPLL selected will be based on the current mode of the encoder's port.
4210  */
4211 void intel_update_active_dpll(struct intel_atomic_state *state,
4212 			      struct intel_crtc *crtc,
4213 			      struct intel_encoder *encoder)
4214 {
4215 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4216 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4217 
4218 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4219 		return;
4220 
4221 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4222 }
4223 
4224 /**
4225  * intel_dpll_get_freq - calculate the DPLL's output frequency
4226  * @i915: i915 device
4227  * @pll: DPLL for which to calculate the output frequency
4228  * @pll_state: DPLL state from which to calculate the output frequency
4229  *
4230  * Return the output frequency corresponding to @pll's passed in @pll_state.
4231  */
4232 int intel_dpll_get_freq(struct drm_i915_private *i915,
4233 			const struct intel_shared_dpll *pll,
4234 			const struct intel_dpll_hw_state *pll_state)
4235 {
4236 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4237 		return 0;
4238 
4239 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4240 }
4241 
4242 /**
4243  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4244  * @i915: i915 device
4245  * @pll: DPLL for which to calculate the output frequency
4246  * @hw_state: DPLL's hardware state
4247  *
4248  * Read out @pll's hardware state into @hw_state.
4249  */
4250 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4251 			     struct intel_shared_dpll *pll,
4252 			     struct intel_dpll_hw_state *hw_state)
4253 {
4254 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4255 }
4256 
4257 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4258 				  struct intel_shared_dpll *pll)
4259 {
4260 	struct intel_crtc *crtc;
4261 
4262 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4263 
4264 	if (IS_JSL_EHL(i915) && pll->on &&
4265 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4266 		pll->wakeref = intel_display_power_get(i915,
4267 						       POWER_DOMAIN_DC_OFF);
4268 	}
4269 
4270 	pll->state.pipe_mask = 0;
4271 	for_each_intel_crtc(&i915->drm, crtc) {
4272 		struct intel_crtc_state *crtc_state =
4273 			to_intel_crtc_state(crtc->base.state);
4274 
4275 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4276 			pll->state.pipe_mask |= BIT(crtc->pipe);
4277 	}
4278 	pll->active_mask = pll->state.pipe_mask;
4279 
4280 	drm_dbg_kms(&i915->drm,
4281 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4282 		    pll->info->name, pll->state.pipe_mask, pll->on);
4283 }
4284 
4285 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4286 {
4287 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4288 		i915->dpll.mgr->update_ref_clks(i915);
4289 }
4290 
4291 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4292 {
4293 	int i;
4294 
4295 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4296 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4297 }
4298 
4299 static void sanitize_dpll_state(struct drm_i915_private *i915,
4300 				struct intel_shared_dpll *pll)
4301 {
4302 	if (!pll->on)
4303 		return;
4304 
4305 	adlp_cmtg_clock_gating_wa(i915, pll);
4306 
4307 	if (pll->active_mask)
4308 		return;
4309 
4310 	drm_dbg_kms(&i915->drm,
4311 		    "%s enabled but not in use, disabling\n",
4312 		    pll->info->name);
4313 
4314 	pll->info->funcs->disable(i915, pll);
4315 	pll->on = false;
4316 }
4317 
4318 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4319 {
4320 	int i;
4321 
4322 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4323 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4324 }
4325 
4326 /**
4327  * intel_dpll_dump_hw_state - write hw_state to dmesg
4328  * @dev_priv: i915 drm device
4329  * @hw_state: hw state to be written to the log
4330  *
4331  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4332  */
4333 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4334 			      const struct intel_dpll_hw_state *hw_state)
4335 {
4336 	if (dev_priv->dpll.mgr) {
4337 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4338 	} else {
4339 		/* fallback for platforms that don't use the shared dpll
4340 		 * infrastructure
4341 		 */
4342 		drm_dbg_kms(&dev_priv->drm,
4343 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4344 			    "fp0: 0x%x, fp1: 0x%x\n",
4345 			    hw_state->dpll,
4346 			    hw_state->dpll_md,
4347 			    hw_state->fp0,
4348 			    hw_state->fp1);
4349 	}
4350 }
4351