1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "intel_de.h"
27 #include "intel_display_types.h"
28 #include "intel_dpio_phy.h"
29 #include "intel_dpll.h"
30 #include "intel_dpll_mgr.h"
31 #include "intel_pch_refclk.h"
32 #include "intel_tc.h"
33 #include "intel_tc_phy_regs.h"
34 
35 /**
36  * DOC: Display PLLs
37  *
38  * Display PLLs used for driving outputs vary by platform. While some have
39  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
40  * from a pool. In the latter scenario, it is possible that multiple pipes
41  * share a PLL if their configurations match.
42  *
43  * This file provides an abstraction over display PLLs. The function
44  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
45  * users of a PLL are tracked and that tracking is integrated with the atomic
46  * modset interface. During an atomic operation, required PLLs can be reserved
47  * for a given CRTC and encoder configuration by calling
48  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
49  * with intel_release_shared_dplls().
50  * Changes to the users are first staged in the atomic state, and then made
51  * effective by calling intel_shared_dpll_swap_state() during the atomic
52  * commit phase.
53  */
54 
55 /* platform specific hooks for managing DPLLs */
56 struct intel_shared_dpll_funcs {
57 	/*
58 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
59 	 * the pll is not already enabled.
60 	 */
61 	void (*enable)(struct drm_i915_private *i915,
62 		       struct intel_shared_dpll *pll);
63 
64 	/*
65 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
66 	 * only when it is safe to disable the pll, i.e., there are no more
67 	 * tracked users for it.
68 	 */
69 	void (*disable)(struct drm_i915_private *i915,
70 			struct intel_shared_dpll *pll);
71 
72 	/*
73 	 * Hook for reading the values currently programmed to the DPLL
74 	 * registers. This is used for initial hw state readout and state
75 	 * verification after a mode set.
76 	 */
77 	bool (*get_hw_state)(struct drm_i915_private *i915,
78 			     struct intel_shared_dpll *pll,
79 			     struct intel_dpll_hw_state *hw_state);
80 
81 	/*
82 	 * Hook for calculating the pll's output frequency based on its passed
83 	 * in state.
84 	 */
85 	int (*get_freq)(struct drm_i915_private *i915,
86 			const struct intel_shared_dpll *pll,
87 			const struct intel_dpll_hw_state *pll_state);
88 };
89 
90 struct intel_dpll_mgr {
91 	const struct dpll_info *dpll_info;
92 
93 	int (*compute_dplls)(struct intel_atomic_state *state,
94 			     struct intel_crtc *crtc,
95 			     struct intel_encoder *encoder);
96 	int (*get_dplls)(struct intel_atomic_state *state,
97 			 struct intel_crtc *crtc,
98 			 struct intel_encoder *encoder);
99 	void (*put_dplls)(struct intel_atomic_state *state,
100 			  struct intel_crtc *crtc);
101 	void (*update_active_dpll)(struct intel_atomic_state *state,
102 				   struct intel_crtc *crtc,
103 				   struct intel_encoder *encoder);
104 	void (*update_ref_clks)(struct drm_i915_private *i915);
105 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
106 			      const struct intel_dpll_hw_state *hw_state);
107 };
108 
109 static void
110 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
111 				  struct intel_shared_dpll_state *shared_dpll)
112 {
113 	enum intel_dpll_id i;
114 
115 	/* Copy shared dpll state */
116 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
117 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
118 
119 		shared_dpll[i] = pll->state;
120 	}
121 }
122 
123 static struct intel_shared_dpll_state *
124 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
125 {
126 	struct intel_atomic_state *state = to_intel_atomic_state(s);
127 
128 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
129 
130 	if (!state->dpll_set) {
131 		state->dpll_set = true;
132 
133 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
134 						  state->shared_dpll);
135 	}
136 
137 	return state->shared_dpll;
138 }
139 
140 /**
141  * intel_get_shared_dpll_by_id - get a DPLL given its id
142  * @dev_priv: i915 device instance
143  * @id: pll id
144  *
145  * Returns:
146  * A pointer to the DPLL with @id
147  */
148 struct intel_shared_dpll *
149 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
150 			    enum intel_dpll_id id)
151 {
152 	return &dev_priv->dpll.shared_dplls[id];
153 }
154 
155 /**
156  * intel_get_shared_dpll_id - get the id of a DPLL
157  * @dev_priv: i915 device instance
158  * @pll: the DPLL
159  *
160  * Returns:
161  * The id of @pll
162  */
163 enum intel_dpll_id
164 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
165 			 struct intel_shared_dpll *pll)
166 {
167 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
168 
169 	if (drm_WARN_ON(&dev_priv->drm,
170 			pll_idx < 0 ||
171 			pll_idx >= dev_priv->dpll.num_shared_dpll))
172 		return -1;
173 
174 	return pll_idx;
175 }
176 
177 /* For ILK+ */
178 void assert_shared_dpll(struct drm_i915_private *dev_priv,
179 			struct intel_shared_dpll *pll,
180 			bool state)
181 {
182 	bool cur_state;
183 	struct intel_dpll_hw_state hw_state;
184 
185 	if (drm_WARN(&dev_priv->drm, !pll,
186 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
187 		return;
188 
189 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
190 	I915_STATE_WARN(cur_state != state,
191 	     "%s assertion failure (expected %s, current %s)\n",
192 			pll->info->name, str_on_off(state),
193 			str_on_off(cur_state));
194 }
195 
196 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
197 {
198 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
199 }
200 
201 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
202 {
203 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
204 }
205 
206 static i915_reg_t
207 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
208 			   struct intel_shared_dpll *pll)
209 {
210 	if (IS_DG1(i915))
211 		return DG1_DPLL_ENABLE(pll->info->id);
212 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
213 		return MG_PLL_ENABLE(0);
214 
215 	return ICL_DPLL_ENABLE(pll->info->id);
216 }
217 
218 static i915_reg_t
219 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
220 			struct intel_shared_dpll *pll)
221 {
222 	const enum intel_dpll_id id = pll->info->id;
223 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
224 
225 	if (IS_ALDERLAKE_P(i915))
226 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
227 
228 	return MG_PLL_ENABLE(tc_port);
229 }
230 
231 /**
232  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
233  * @crtc_state: CRTC, and its state, which has a shared DPLL
234  *
235  * Enable the shared DPLL used by @crtc.
236  */
237 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
238 {
239 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
240 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
241 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
242 	unsigned int pipe_mask = BIT(crtc->pipe);
243 	unsigned int old_mask;
244 
245 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
246 		return;
247 
248 	mutex_lock(&dev_priv->dpll.lock);
249 	old_mask = pll->active_mask;
250 
251 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
252 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
253 		goto out;
254 
255 	pll->active_mask |= pipe_mask;
256 
257 	drm_dbg_kms(&dev_priv->drm,
258 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
259 		    pll->info->name, pll->active_mask, pll->on,
260 		    crtc->base.base.id, crtc->base.name);
261 
262 	if (old_mask) {
263 		drm_WARN_ON(&dev_priv->drm, !pll->on);
264 		assert_shared_dpll_enabled(dev_priv, pll);
265 		goto out;
266 	}
267 	drm_WARN_ON(&dev_priv->drm, pll->on);
268 
269 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
270 	pll->info->funcs->enable(dev_priv, pll);
271 	pll->on = true;
272 
273 out:
274 	mutex_unlock(&dev_priv->dpll.lock);
275 }
276 
277 /**
278  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
279  * @crtc_state: CRTC, and its state, which has a shared DPLL
280  *
281  * Disable the shared DPLL used by @crtc.
282  */
283 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
284 {
285 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
286 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
288 	unsigned int pipe_mask = BIT(crtc->pipe);
289 
290 	/* PCH only available on ILK+ */
291 	if (DISPLAY_VER(dev_priv) < 5)
292 		return;
293 
294 	if (pll == NULL)
295 		return;
296 
297 	mutex_lock(&dev_priv->dpll.lock);
298 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
299 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
300 		     crtc->base.base.id, crtc->base.name))
301 		goto out;
302 
303 	drm_dbg_kms(&dev_priv->drm,
304 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
305 		    pll->info->name, pll->active_mask, pll->on,
306 		    crtc->base.base.id, crtc->base.name);
307 
308 	assert_shared_dpll_enabled(dev_priv, pll);
309 	drm_WARN_ON(&dev_priv->drm, !pll->on);
310 
311 	pll->active_mask &= ~pipe_mask;
312 	if (pll->active_mask)
313 		goto out;
314 
315 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
316 	pll->info->funcs->disable(dev_priv, pll);
317 	pll->on = false;
318 
319 out:
320 	mutex_unlock(&dev_priv->dpll.lock);
321 }
322 
323 static struct intel_shared_dpll *
324 intel_find_shared_dpll(struct intel_atomic_state *state,
325 		       const struct intel_crtc *crtc,
326 		       const struct intel_dpll_hw_state *pll_state,
327 		       unsigned long dpll_mask)
328 {
329 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
330 	struct intel_shared_dpll *pll, *unused_pll = NULL;
331 	struct intel_shared_dpll_state *shared_dpll;
332 	enum intel_dpll_id i;
333 
334 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
335 
336 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
337 
338 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
339 		pll = &dev_priv->dpll.shared_dplls[i];
340 
341 		/* Only want to check enabled timings first */
342 		if (shared_dpll[i].pipe_mask == 0) {
343 			if (!unused_pll)
344 				unused_pll = pll;
345 			continue;
346 		}
347 
348 		if (memcmp(pll_state,
349 			   &shared_dpll[i].hw_state,
350 			   sizeof(*pll_state)) == 0) {
351 			drm_dbg_kms(&dev_priv->drm,
352 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
353 				    crtc->base.base.id, crtc->base.name,
354 				    pll->info->name,
355 				    shared_dpll[i].pipe_mask,
356 				    pll->active_mask);
357 			return pll;
358 		}
359 	}
360 
361 	/* Ok no matching timings, maybe there's a free one? */
362 	if (unused_pll) {
363 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
364 			    crtc->base.base.id, crtc->base.name,
365 			    unused_pll->info->name);
366 		return unused_pll;
367 	}
368 
369 	return NULL;
370 }
371 
372 static void
373 intel_reference_shared_dpll(struct intel_atomic_state *state,
374 			    const struct intel_crtc *crtc,
375 			    const struct intel_shared_dpll *pll,
376 			    const struct intel_dpll_hw_state *pll_state)
377 {
378 	struct drm_i915_private *i915 = to_i915(state->base.dev);
379 	struct intel_shared_dpll_state *shared_dpll;
380 	const enum intel_dpll_id id = pll->info->id;
381 
382 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
383 
384 	if (shared_dpll[id].pipe_mask == 0)
385 		shared_dpll[id].hw_state = *pll_state;
386 
387 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
388 		pipe_name(crtc->pipe));
389 
390 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
391 }
392 
393 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
394 					  const struct intel_crtc *crtc,
395 					  const struct intel_shared_dpll *pll)
396 {
397 	struct intel_shared_dpll_state *shared_dpll;
398 
399 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
400 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
401 }
402 
403 static void intel_put_dpll(struct intel_atomic_state *state,
404 			   struct intel_crtc *crtc)
405 {
406 	const struct intel_crtc_state *old_crtc_state =
407 		intel_atomic_get_old_crtc_state(state, crtc);
408 	struct intel_crtc_state *new_crtc_state =
409 		intel_atomic_get_new_crtc_state(state, crtc);
410 
411 	new_crtc_state->shared_dpll = NULL;
412 
413 	if (!old_crtc_state->shared_dpll)
414 		return;
415 
416 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
417 }
418 
419 /**
420  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
421  * @state: atomic state
422  *
423  * This is the dpll version of drm_atomic_helper_swap_state() since the
424  * helper does not handle driver-specific global state.
425  *
426  * For consistency with atomic helpers this function does a complete swap,
427  * i.e. it also puts the current state into @state, even though there is no
428  * need for that at this moment.
429  */
430 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
431 {
432 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
433 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
434 	enum intel_dpll_id i;
435 
436 	if (!state->dpll_set)
437 		return;
438 
439 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
440 		struct intel_shared_dpll *pll =
441 			&dev_priv->dpll.shared_dplls[i];
442 
443 		swap(pll->state, shared_dpll[i]);
444 	}
445 }
446 
447 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
448 				      struct intel_shared_dpll *pll,
449 				      struct intel_dpll_hw_state *hw_state)
450 {
451 	const enum intel_dpll_id id = pll->info->id;
452 	intel_wakeref_t wakeref;
453 	u32 val;
454 
455 	wakeref = intel_display_power_get_if_enabled(dev_priv,
456 						     POWER_DOMAIN_DISPLAY_CORE);
457 	if (!wakeref)
458 		return false;
459 
460 	val = intel_de_read(dev_priv, PCH_DPLL(id));
461 	hw_state->dpll = val;
462 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
463 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
464 
465 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
466 
467 	return val & DPLL_VCO_ENABLE;
468 }
469 
470 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
471 {
472 	u32 val;
473 	bool enabled;
474 
475 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
476 
477 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
478 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
479 			    DREF_SUPERSPREAD_SOURCE_MASK));
480 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
481 }
482 
483 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
484 				struct intel_shared_dpll *pll)
485 {
486 	const enum intel_dpll_id id = pll->info->id;
487 
488 	/* PCH refclock must be enabled first */
489 	ibx_assert_pch_refclk_enabled(dev_priv);
490 
491 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
492 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
493 
494 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
495 
496 	/* Wait for the clocks to stabilize. */
497 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
498 	udelay(150);
499 
500 	/* The pixel multiplier can only be updated once the
501 	 * DPLL is enabled and the clocks are stable.
502 	 *
503 	 * So write it again.
504 	 */
505 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
506 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
507 	udelay(200);
508 }
509 
510 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
511 				 struct intel_shared_dpll *pll)
512 {
513 	const enum intel_dpll_id id = pll->info->id;
514 
515 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
516 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
517 	udelay(200);
518 }
519 
520 static int ibx_compute_dpll(struct intel_atomic_state *state,
521 			    struct intel_crtc *crtc,
522 			    struct intel_encoder *encoder)
523 {
524 	return 0;
525 }
526 
527 static int ibx_get_dpll(struct intel_atomic_state *state,
528 			struct intel_crtc *crtc,
529 			struct intel_encoder *encoder)
530 {
531 	struct intel_crtc_state *crtc_state =
532 		intel_atomic_get_new_crtc_state(state, crtc);
533 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
534 	struct intel_shared_dpll *pll;
535 	enum intel_dpll_id i;
536 
537 	if (HAS_PCH_IBX(dev_priv)) {
538 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
539 		i = (enum intel_dpll_id) crtc->pipe;
540 		pll = &dev_priv->dpll.shared_dplls[i];
541 
542 		drm_dbg_kms(&dev_priv->drm,
543 			    "[CRTC:%d:%s] using pre-allocated %s\n",
544 			    crtc->base.base.id, crtc->base.name,
545 			    pll->info->name);
546 	} else {
547 		pll = intel_find_shared_dpll(state, crtc,
548 					     &crtc_state->dpll_hw_state,
549 					     BIT(DPLL_ID_PCH_PLL_B) |
550 					     BIT(DPLL_ID_PCH_PLL_A));
551 	}
552 
553 	if (!pll)
554 		return -EINVAL;
555 
556 	/* reference the pll */
557 	intel_reference_shared_dpll(state, crtc,
558 				    pll, &crtc_state->dpll_hw_state);
559 
560 	crtc_state->shared_dpll = pll;
561 
562 	return 0;
563 }
564 
565 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
566 			      const struct intel_dpll_hw_state *hw_state)
567 {
568 	drm_dbg_kms(&dev_priv->drm,
569 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
570 		    "fp0: 0x%x, fp1: 0x%x\n",
571 		    hw_state->dpll,
572 		    hw_state->dpll_md,
573 		    hw_state->fp0,
574 		    hw_state->fp1);
575 }
576 
577 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
578 	.enable = ibx_pch_dpll_enable,
579 	.disable = ibx_pch_dpll_disable,
580 	.get_hw_state = ibx_pch_dpll_get_hw_state,
581 };
582 
583 static const struct dpll_info pch_plls[] = {
584 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
585 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
586 	{ },
587 };
588 
589 static const struct intel_dpll_mgr pch_pll_mgr = {
590 	.dpll_info = pch_plls,
591 	.compute_dplls = ibx_compute_dpll,
592 	.get_dplls = ibx_get_dpll,
593 	.put_dplls = intel_put_dpll,
594 	.dump_hw_state = ibx_dump_hw_state,
595 };
596 
597 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
598 				 struct intel_shared_dpll *pll)
599 {
600 	const enum intel_dpll_id id = pll->info->id;
601 
602 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
603 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
604 	udelay(20);
605 }
606 
607 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
608 				struct intel_shared_dpll *pll)
609 {
610 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
611 	intel_de_posting_read(dev_priv, SPLL_CTL);
612 	udelay(20);
613 }
614 
615 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
616 				  struct intel_shared_dpll *pll)
617 {
618 	const enum intel_dpll_id id = pll->info->id;
619 	u32 val;
620 
621 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
622 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
623 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
624 
625 	/*
626 	 * Try to set up the PCH reference clock once all DPLLs
627 	 * that depend on it have been shut down.
628 	 */
629 	if (dev_priv->pch_ssc_use & BIT(id))
630 		intel_init_pch_refclk(dev_priv);
631 }
632 
633 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
634 				 struct intel_shared_dpll *pll)
635 {
636 	enum intel_dpll_id id = pll->info->id;
637 	u32 val;
638 
639 	val = intel_de_read(dev_priv, SPLL_CTL);
640 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
641 	intel_de_posting_read(dev_priv, SPLL_CTL);
642 
643 	/*
644 	 * Try to set up the PCH reference clock once all DPLLs
645 	 * that depend on it have been shut down.
646 	 */
647 	if (dev_priv->pch_ssc_use & BIT(id))
648 		intel_init_pch_refclk(dev_priv);
649 }
650 
651 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
652 				       struct intel_shared_dpll *pll,
653 				       struct intel_dpll_hw_state *hw_state)
654 {
655 	const enum intel_dpll_id id = pll->info->id;
656 	intel_wakeref_t wakeref;
657 	u32 val;
658 
659 	wakeref = intel_display_power_get_if_enabled(dev_priv,
660 						     POWER_DOMAIN_DISPLAY_CORE);
661 	if (!wakeref)
662 		return false;
663 
664 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
665 	hw_state->wrpll = val;
666 
667 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
668 
669 	return val & WRPLL_PLL_ENABLE;
670 }
671 
672 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
673 				      struct intel_shared_dpll *pll,
674 				      struct intel_dpll_hw_state *hw_state)
675 {
676 	intel_wakeref_t wakeref;
677 	u32 val;
678 
679 	wakeref = intel_display_power_get_if_enabled(dev_priv,
680 						     POWER_DOMAIN_DISPLAY_CORE);
681 	if (!wakeref)
682 		return false;
683 
684 	val = intel_de_read(dev_priv, SPLL_CTL);
685 	hw_state->spll = val;
686 
687 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
688 
689 	return val & SPLL_PLL_ENABLE;
690 }
691 
692 #define LC_FREQ 2700
693 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
694 
695 #define P_MIN 2
696 #define P_MAX 64
697 #define P_INC 2
698 
699 /* Constraints for PLL good behavior */
700 #define REF_MIN 48
701 #define REF_MAX 400
702 #define VCO_MIN 2400
703 #define VCO_MAX 4800
704 
705 struct hsw_wrpll_rnp {
706 	unsigned p, n2, r2;
707 };
708 
709 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
710 {
711 	unsigned budget;
712 
713 	switch (clock) {
714 	case 25175000:
715 	case 25200000:
716 	case 27000000:
717 	case 27027000:
718 	case 37762500:
719 	case 37800000:
720 	case 40500000:
721 	case 40541000:
722 	case 54000000:
723 	case 54054000:
724 	case 59341000:
725 	case 59400000:
726 	case 72000000:
727 	case 74176000:
728 	case 74250000:
729 	case 81000000:
730 	case 81081000:
731 	case 89012000:
732 	case 89100000:
733 	case 108000000:
734 	case 108108000:
735 	case 111264000:
736 	case 111375000:
737 	case 148352000:
738 	case 148500000:
739 	case 162000000:
740 	case 162162000:
741 	case 222525000:
742 	case 222750000:
743 	case 296703000:
744 	case 297000000:
745 		budget = 0;
746 		break;
747 	case 233500000:
748 	case 245250000:
749 	case 247750000:
750 	case 253250000:
751 	case 298000000:
752 		budget = 1500;
753 		break;
754 	case 169128000:
755 	case 169500000:
756 	case 179500000:
757 	case 202000000:
758 		budget = 2000;
759 		break;
760 	case 256250000:
761 	case 262500000:
762 	case 270000000:
763 	case 272500000:
764 	case 273750000:
765 	case 280750000:
766 	case 281250000:
767 	case 286000000:
768 	case 291750000:
769 		budget = 4000;
770 		break;
771 	case 267250000:
772 	case 268500000:
773 		budget = 5000;
774 		break;
775 	default:
776 		budget = 1000;
777 		break;
778 	}
779 
780 	return budget;
781 }
782 
783 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
784 				 unsigned int r2, unsigned int n2,
785 				 unsigned int p,
786 				 struct hsw_wrpll_rnp *best)
787 {
788 	u64 a, b, c, d, diff, diff_best;
789 
790 	/* No best (r,n,p) yet */
791 	if (best->p == 0) {
792 		best->p = p;
793 		best->n2 = n2;
794 		best->r2 = r2;
795 		return;
796 	}
797 
798 	/*
799 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
800 	 * freq2k.
801 	 *
802 	 * delta = 1e6 *
803 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
804 	 *	   freq2k;
805 	 *
806 	 * and we would like delta <= budget.
807 	 *
808 	 * If the discrepancy is above the PPM-based budget, always prefer to
809 	 * improve upon the previous solution.  However, if you're within the
810 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
811 	 */
812 	a = freq2k * budget * p * r2;
813 	b = freq2k * budget * best->p * best->r2;
814 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
815 	diff_best = abs_diff(freq2k * best->p * best->r2,
816 			     LC_FREQ_2K * best->n2);
817 	c = 1000000 * diff;
818 	d = 1000000 * diff_best;
819 
820 	if (a < c && b < d) {
821 		/* If both are above the budget, pick the closer */
822 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
823 			best->p = p;
824 			best->n2 = n2;
825 			best->r2 = r2;
826 		}
827 	} else if (a >= c && b < d) {
828 		/* If A is below the threshold but B is above it?  Update. */
829 		best->p = p;
830 		best->n2 = n2;
831 		best->r2 = r2;
832 	} else if (a >= c && b >= d) {
833 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
834 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
835 			best->p = p;
836 			best->n2 = n2;
837 			best->r2 = r2;
838 		}
839 	}
840 	/* Otherwise a < c && b >= d, do nothing */
841 }
842 
843 static void
844 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
845 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
846 {
847 	u64 freq2k;
848 	unsigned p, n2, r2;
849 	struct hsw_wrpll_rnp best = {};
850 	unsigned budget;
851 
852 	freq2k = clock / 100;
853 
854 	budget = hsw_wrpll_get_budget_for_freq(clock);
855 
856 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
857 	 * and directly pass the LC PLL to it. */
858 	if (freq2k == 5400000) {
859 		*n2_out = 2;
860 		*p_out = 1;
861 		*r2_out = 2;
862 		return;
863 	}
864 
865 	/*
866 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
867 	 * the WR PLL.
868 	 *
869 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
870 	 * Injecting R2 = 2 * R gives:
871 	 *   REF_MAX * r2 > LC_FREQ * 2 and
872 	 *   REF_MIN * r2 < LC_FREQ * 2
873 	 *
874 	 * Which means the desired boundaries for r2 are:
875 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
876 	 *
877 	 */
878 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
879 	     r2 <= LC_FREQ * 2 / REF_MIN;
880 	     r2++) {
881 
882 		/*
883 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
884 		 *
885 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
886 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
887 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
888 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
889 		 *
890 		 * Which means the desired boundaries for n2 are:
891 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
892 		 */
893 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
894 		     n2 <= VCO_MAX * r2 / LC_FREQ;
895 		     n2++) {
896 
897 			for (p = P_MIN; p <= P_MAX; p += P_INC)
898 				hsw_wrpll_update_rnp(freq2k, budget,
899 						     r2, n2, p, &best);
900 		}
901 	}
902 
903 	*n2_out = best.n2;
904 	*p_out = best.p;
905 	*r2_out = best.r2;
906 }
907 
908 static int
909 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
910 			   struct intel_crtc *crtc)
911 {
912 	struct intel_crtc_state *crtc_state =
913 		intel_atomic_get_new_crtc_state(state, crtc);
914 	unsigned int p, n2, r2;
915 
916 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
917 
918 	crtc_state->dpll_hw_state.wrpll =
919 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
920 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
921 		WRPLL_DIVIDER_POST(p);
922 
923 	return 0;
924 }
925 
926 static struct intel_shared_dpll *
927 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
928 		       struct intel_crtc *crtc)
929 {
930 	struct intel_crtc_state *crtc_state =
931 		intel_atomic_get_new_crtc_state(state, crtc);
932 
933 	return intel_find_shared_dpll(state, crtc,
934 				      &crtc_state->dpll_hw_state,
935 				      BIT(DPLL_ID_WRPLL2) |
936 				      BIT(DPLL_ID_WRPLL1));
937 }
938 
939 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
940 				  const struct intel_shared_dpll *pll,
941 				  const struct intel_dpll_hw_state *pll_state)
942 {
943 	int refclk;
944 	int n, p, r;
945 	u32 wrpll = pll_state->wrpll;
946 
947 	switch (wrpll & WRPLL_REF_MASK) {
948 	case WRPLL_REF_SPECIAL_HSW:
949 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
950 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
951 			refclk = dev_priv->dpll.ref_clks.nssc;
952 			break;
953 		}
954 		fallthrough;
955 	case WRPLL_REF_PCH_SSC:
956 		/*
957 		 * We could calculate spread here, but our checking
958 		 * code only cares about 5% accuracy, and spread is a max of
959 		 * 0.5% downspread.
960 		 */
961 		refclk = dev_priv->dpll.ref_clks.ssc;
962 		break;
963 	case WRPLL_REF_LCPLL:
964 		refclk = 2700000;
965 		break;
966 	default:
967 		MISSING_CASE(wrpll);
968 		return 0;
969 	}
970 
971 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
972 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
973 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
974 
975 	/* Convert to KHz, p & r have a fixed point portion */
976 	return (refclk * n / 10) / (p * r) * 2;
977 }
978 
979 static int
980 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
981 {
982 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
983 	int clock = crtc_state->port_clock;
984 
985 	switch (clock / 2) {
986 	case 81000:
987 	case 135000:
988 	case 270000:
989 		return 0;
990 	default:
991 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
992 			    clock);
993 		return -EINVAL;
994 	}
995 }
996 
997 static struct intel_shared_dpll *
998 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
999 {
1000 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1001 	struct intel_shared_dpll *pll;
1002 	enum intel_dpll_id pll_id;
1003 	int clock = crtc_state->port_clock;
1004 
1005 	switch (clock / 2) {
1006 	case 81000:
1007 		pll_id = DPLL_ID_LCPLL_810;
1008 		break;
1009 	case 135000:
1010 		pll_id = DPLL_ID_LCPLL_1350;
1011 		break;
1012 	case 270000:
1013 		pll_id = DPLL_ID_LCPLL_2700;
1014 		break;
1015 	default:
1016 		MISSING_CASE(clock / 2);
1017 		return NULL;
1018 	}
1019 
1020 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1021 
1022 	if (!pll)
1023 		return NULL;
1024 
1025 	return pll;
1026 }
1027 
1028 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1029 				  const struct intel_shared_dpll *pll,
1030 				  const struct intel_dpll_hw_state *pll_state)
1031 {
1032 	int link_clock = 0;
1033 
1034 	switch (pll->info->id) {
1035 	case DPLL_ID_LCPLL_810:
1036 		link_clock = 81000;
1037 		break;
1038 	case DPLL_ID_LCPLL_1350:
1039 		link_clock = 135000;
1040 		break;
1041 	case DPLL_ID_LCPLL_2700:
1042 		link_clock = 270000;
1043 		break;
1044 	default:
1045 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1046 		break;
1047 	}
1048 
1049 	return link_clock * 2;
1050 }
1051 
1052 static int
1053 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1054 			  struct intel_crtc *crtc)
1055 {
1056 	struct intel_crtc_state *crtc_state =
1057 		intel_atomic_get_new_crtc_state(state, crtc);
1058 
1059 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1060 		return -EINVAL;
1061 
1062 	crtc_state->dpll_hw_state.spll =
1063 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1064 
1065 	return 0;
1066 }
1067 
1068 static struct intel_shared_dpll *
1069 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1070 		      struct intel_crtc *crtc)
1071 {
1072 	struct intel_crtc_state *crtc_state =
1073 		intel_atomic_get_new_crtc_state(state, crtc);
1074 
1075 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1076 				      BIT(DPLL_ID_SPLL));
1077 }
1078 
1079 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1080 				 const struct intel_shared_dpll *pll,
1081 				 const struct intel_dpll_hw_state *pll_state)
1082 {
1083 	int link_clock = 0;
1084 
1085 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1086 	case SPLL_FREQ_810MHz:
1087 		link_clock = 81000;
1088 		break;
1089 	case SPLL_FREQ_1350MHz:
1090 		link_clock = 135000;
1091 		break;
1092 	case SPLL_FREQ_2700MHz:
1093 		link_clock = 270000;
1094 		break;
1095 	default:
1096 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1097 		break;
1098 	}
1099 
1100 	return link_clock * 2;
1101 }
1102 
1103 static int hsw_compute_dpll(struct intel_atomic_state *state,
1104 			    struct intel_crtc *crtc,
1105 			    struct intel_encoder *encoder)
1106 {
1107 	struct intel_crtc_state *crtc_state =
1108 		intel_atomic_get_new_crtc_state(state, crtc);
1109 
1110 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1111 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1112 	else if (intel_crtc_has_dp_encoder(crtc_state))
1113 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1114 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1115 		return hsw_ddi_spll_compute_dpll(state, crtc);
1116 	else
1117 		return -EINVAL;
1118 }
1119 
1120 static int hsw_get_dpll(struct intel_atomic_state *state,
1121 			struct intel_crtc *crtc,
1122 			struct intel_encoder *encoder)
1123 {
1124 	struct intel_crtc_state *crtc_state =
1125 		intel_atomic_get_new_crtc_state(state, crtc);
1126 	struct intel_shared_dpll *pll = NULL;
1127 
1128 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1129 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1130 	else if (intel_crtc_has_dp_encoder(crtc_state))
1131 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1132 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1133 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1134 
1135 	if (!pll)
1136 		return -EINVAL;
1137 
1138 	intel_reference_shared_dpll(state, crtc,
1139 				    pll, &crtc_state->dpll_hw_state);
1140 
1141 	crtc_state->shared_dpll = pll;
1142 
1143 	return 0;
1144 }
1145 
1146 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1147 {
1148 	i915->dpll.ref_clks.ssc = 135000;
1149 	/* Non-SSC is only used on non-ULT HSW. */
1150 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1151 		i915->dpll.ref_clks.nssc = 24000;
1152 	else
1153 		i915->dpll.ref_clks.nssc = 135000;
1154 }
1155 
1156 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1157 			      const struct intel_dpll_hw_state *hw_state)
1158 {
1159 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1160 		    hw_state->wrpll, hw_state->spll);
1161 }
1162 
1163 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1164 	.enable = hsw_ddi_wrpll_enable,
1165 	.disable = hsw_ddi_wrpll_disable,
1166 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1167 	.get_freq = hsw_ddi_wrpll_get_freq,
1168 };
1169 
1170 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1171 	.enable = hsw_ddi_spll_enable,
1172 	.disable = hsw_ddi_spll_disable,
1173 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1174 	.get_freq = hsw_ddi_spll_get_freq,
1175 };
1176 
1177 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1178 				 struct intel_shared_dpll *pll)
1179 {
1180 }
1181 
1182 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1183 				  struct intel_shared_dpll *pll)
1184 {
1185 }
1186 
1187 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1188 				       struct intel_shared_dpll *pll,
1189 				       struct intel_dpll_hw_state *hw_state)
1190 {
1191 	return true;
1192 }
1193 
1194 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1195 	.enable = hsw_ddi_lcpll_enable,
1196 	.disable = hsw_ddi_lcpll_disable,
1197 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1198 	.get_freq = hsw_ddi_lcpll_get_freq,
1199 };
1200 
1201 static const struct dpll_info hsw_plls[] = {
1202 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1203 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1204 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1205 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1206 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1207 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1208 	{ },
1209 };
1210 
1211 static const struct intel_dpll_mgr hsw_pll_mgr = {
1212 	.dpll_info = hsw_plls,
1213 	.compute_dplls = hsw_compute_dpll,
1214 	.get_dplls = hsw_get_dpll,
1215 	.put_dplls = intel_put_dpll,
1216 	.update_ref_clks = hsw_update_dpll_ref_clks,
1217 	.dump_hw_state = hsw_dump_hw_state,
1218 };
1219 
1220 struct skl_dpll_regs {
1221 	i915_reg_t ctl, cfgcr1, cfgcr2;
1222 };
1223 
1224 /* this array is indexed by the *shared* pll id */
1225 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1226 	{
1227 		/* DPLL 0 */
1228 		.ctl = LCPLL1_CTL,
1229 		/* DPLL 0 doesn't support HDMI mode */
1230 	},
1231 	{
1232 		/* DPLL 1 */
1233 		.ctl = LCPLL2_CTL,
1234 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1235 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1236 	},
1237 	{
1238 		/* DPLL 2 */
1239 		.ctl = WRPLL_CTL(0),
1240 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1241 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1242 	},
1243 	{
1244 		/* DPLL 3 */
1245 		.ctl = WRPLL_CTL(1),
1246 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1247 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1248 	},
1249 };
1250 
1251 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1252 				    struct intel_shared_dpll *pll)
1253 {
1254 	const enum intel_dpll_id id = pll->info->id;
1255 	u32 val;
1256 
1257 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1258 
1259 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1260 		 DPLL_CTRL1_SSC(id) |
1261 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1262 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1263 
1264 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1265 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1266 }
1267 
1268 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1269 			       struct intel_shared_dpll *pll)
1270 {
1271 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1272 	const enum intel_dpll_id id = pll->info->id;
1273 
1274 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1275 
1276 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1277 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1278 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1279 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1280 
1281 	/* the enable bit is always bit 31 */
1282 	intel_de_write(dev_priv, regs[id].ctl,
1283 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1284 
1285 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1286 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1287 }
1288 
1289 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1290 				 struct intel_shared_dpll *pll)
1291 {
1292 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1293 }
1294 
1295 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1296 				struct intel_shared_dpll *pll)
1297 {
1298 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1299 	const enum intel_dpll_id id = pll->info->id;
1300 
1301 	/* the enable bit is always bit 31 */
1302 	intel_de_write(dev_priv, regs[id].ctl,
1303 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1304 	intel_de_posting_read(dev_priv, regs[id].ctl);
1305 }
1306 
1307 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1308 				  struct intel_shared_dpll *pll)
1309 {
1310 }
1311 
1312 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1313 				     struct intel_shared_dpll *pll,
1314 				     struct intel_dpll_hw_state *hw_state)
1315 {
1316 	u32 val;
1317 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1318 	const enum intel_dpll_id id = pll->info->id;
1319 	intel_wakeref_t wakeref;
1320 	bool ret;
1321 
1322 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1323 						     POWER_DOMAIN_DISPLAY_CORE);
1324 	if (!wakeref)
1325 		return false;
1326 
1327 	ret = false;
1328 
1329 	val = intel_de_read(dev_priv, regs[id].ctl);
1330 	if (!(val & LCPLL_PLL_ENABLE))
1331 		goto out;
1332 
1333 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1334 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1335 
1336 	/* avoid reading back stale values if HDMI mode is not enabled */
1337 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1338 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1339 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1340 	}
1341 	ret = true;
1342 
1343 out:
1344 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1345 
1346 	return ret;
1347 }
1348 
1349 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1350 				       struct intel_shared_dpll *pll,
1351 				       struct intel_dpll_hw_state *hw_state)
1352 {
1353 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1354 	const enum intel_dpll_id id = pll->info->id;
1355 	intel_wakeref_t wakeref;
1356 	u32 val;
1357 	bool ret;
1358 
1359 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1360 						     POWER_DOMAIN_DISPLAY_CORE);
1361 	if (!wakeref)
1362 		return false;
1363 
1364 	ret = false;
1365 
1366 	/* DPLL0 is always enabled since it drives CDCLK */
1367 	val = intel_de_read(dev_priv, regs[id].ctl);
1368 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1369 		goto out;
1370 
1371 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1372 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1373 
1374 	ret = true;
1375 
1376 out:
1377 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1378 
1379 	return ret;
1380 }
1381 
1382 struct skl_wrpll_context {
1383 	u64 min_deviation;		/* current minimal deviation */
1384 	u64 central_freq;		/* chosen central freq */
1385 	u64 dco_freq;			/* chosen dco freq */
1386 	unsigned int p;			/* chosen divider */
1387 };
1388 
1389 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1390 #define SKL_DCO_MAX_PDEVIATION	100
1391 #define SKL_DCO_MAX_NDEVIATION	600
1392 
1393 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1394 				  u64 central_freq,
1395 				  u64 dco_freq,
1396 				  unsigned int divider)
1397 {
1398 	u64 deviation;
1399 
1400 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1401 			      central_freq);
1402 
1403 	/* positive deviation */
1404 	if (dco_freq >= central_freq) {
1405 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1406 		    deviation < ctx->min_deviation) {
1407 			ctx->min_deviation = deviation;
1408 			ctx->central_freq = central_freq;
1409 			ctx->dco_freq = dco_freq;
1410 			ctx->p = divider;
1411 		}
1412 	/* negative deviation */
1413 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1414 		   deviation < ctx->min_deviation) {
1415 		ctx->min_deviation = deviation;
1416 		ctx->central_freq = central_freq;
1417 		ctx->dco_freq = dco_freq;
1418 		ctx->p = divider;
1419 	}
1420 }
1421 
1422 static void skl_wrpll_get_multipliers(unsigned int p,
1423 				      unsigned int *p0 /* out */,
1424 				      unsigned int *p1 /* out */,
1425 				      unsigned int *p2 /* out */)
1426 {
1427 	/* even dividers */
1428 	if (p % 2 == 0) {
1429 		unsigned int half = p / 2;
1430 
1431 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1432 			*p0 = 2;
1433 			*p1 = 1;
1434 			*p2 = half;
1435 		} else if (half % 2 == 0) {
1436 			*p0 = 2;
1437 			*p1 = half / 2;
1438 			*p2 = 2;
1439 		} else if (half % 3 == 0) {
1440 			*p0 = 3;
1441 			*p1 = half / 3;
1442 			*p2 = 2;
1443 		} else if (half % 7 == 0) {
1444 			*p0 = 7;
1445 			*p1 = half / 7;
1446 			*p2 = 2;
1447 		}
1448 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1449 		*p0 = 3;
1450 		*p1 = 1;
1451 		*p2 = p / 3;
1452 	} else if (p == 5 || p == 7) {
1453 		*p0 = p;
1454 		*p1 = 1;
1455 		*p2 = 1;
1456 	} else if (p == 15) {
1457 		*p0 = 3;
1458 		*p1 = 1;
1459 		*p2 = 5;
1460 	} else if (p == 21) {
1461 		*p0 = 7;
1462 		*p1 = 1;
1463 		*p2 = 3;
1464 	} else if (p == 35) {
1465 		*p0 = 7;
1466 		*p1 = 1;
1467 		*p2 = 5;
1468 	}
1469 }
1470 
1471 struct skl_wrpll_params {
1472 	u32 dco_fraction;
1473 	u32 dco_integer;
1474 	u32 qdiv_ratio;
1475 	u32 qdiv_mode;
1476 	u32 kdiv;
1477 	u32 pdiv;
1478 	u32 central_freq;
1479 };
1480 
1481 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1482 				      u64 afe_clock,
1483 				      int ref_clock,
1484 				      u64 central_freq,
1485 				      u32 p0, u32 p1, u32 p2)
1486 {
1487 	u64 dco_freq;
1488 
1489 	switch (central_freq) {
1490 	case 9600000000ULL:
1491 		params->central_freq = 0;
1492 		break;
1493 	case 9000000000ULL:
1494 		params->central_freq = 1;
1495 		break;
1496 	case 8400000000ULL:
1497 		params->central_freq = 3;
1498 	}
1499 
1500 	switch (p0) {
1501 	case 1:
1502 		params->pdiv = 0;
1503 		break;
1504 	case 2:
1505 		params->pdiv = 1;
1506 		break;
1507 	case 3:
1508 		params->pdiv = 2;
1509 		break;
1510 	case 7:
1511 		params->pdiv = 4;
1512 		break;
1513 	default:
1514 		WARN(1, "Incorrect PDiv\n");
1515 	}
1516 
1517 	switch (p2) {
1518 	case 5:
1519 		params->kdiv = 0;
1520 		break;
1521 	case 2:
1522 		params->kdiv = 1;
1523 		break;
1524 	case 3:
1525 		params->kdiv = 2;
1526 		break;
1527 	case 1:
1528 		params->kdiv = 3;
1529 		break;
1530 	default:
1531 		WARN(1, "Incorrect KDiv\n");
1532 	}
1533 
1534 	params->qdiv_ratio = p1;
1535 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1536 
1537 	dco_freq = p0 * p1 * p2 * afe_clock;
1538 
1539 	/*
1540 	 * Intermediate values are in Hz.
1541 	 * Divide by MHz to match bsepc
1542 	 */
1543 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1544 	params->dco_fraction =
1545 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1546 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1547 }
1548 
1549 static int
1550 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1551 			int ref_clock,
1552 			struct skl_wrpll_params *wrpll_params)
1553 {
1554 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1555 						 9000000000ULL,
1556 						 9600000000ULL };
1557 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1558 					    24, 28, 30, 32, 36, 40, 42, 44,
1559 					    48, 52, 54, 56, 60, 64, 66, 68,
1560 					    70, 72, 76, 78, 80, 84, 88, 90,
1561 					    92, 96, 98 };
1562 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1563 	static const struct {
1564 		const u8 *list;
1565 		int n_dividers;
1566 	} dividers[] = {
1567 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1568 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1569 	};
1570 	struct skl_wrpll_context ctx = {
1571 		.min_deviation = U64_MAX,
1572 	};
1573 	unsigned int dco, d, i;
1574 	unsigned int p0, p1, p2;
1575 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1576 
1577 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1578 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1579 			for (i = 0; i < dividers[d].n_dividers; i++) {
1580 				unsigned int p = dividers[d].list[i];
1581 				u64 dco_freq = p * afe_clock;
1582 
1583 				skl_wrpll_try_divider(&ctx,
1584 						      dco_central_freq[dco],
1585 						      dco_freq,
1586 						      p);
1587 				/*
1588 				 * Skip the remaining dividers if we're sure to
1589 				 * have found the definitive divider, we can't
1590 				 * improve a 0 deviation.
1591 				 */
1592 				if (ctx.min_deviation == 0)
1593 					goto skip_remaining_dividers;
1594 			}
1595 		}
1596 
1597 skip_remaining_dividers:
1598 		/*
1599 		 * If a solution is found with an even divider, prefer
1600 		 * this one.
1601 		 */
1602 		if (d == 0 && ctx.p)
1603 			break;
1604 	}
1605 
1606 	if (!ctx.p)
1607 		return -EINVAL;
1608 
1609 	/*
1610 	 * gcc incorrectly analyses that these can be used without being
1611 	 * initialized. To be fair, it's hard to guess.
1612 	 */
1613 	p0 = p1 = p2 = 0;
1614 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1615 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1616 				  ctx.central_freq, p0, p1, p2);
1617 
1618 	return 0;
1619 }
1620 
1621 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1622 {
1623 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1624 	struct skl_wrpll_params wrpll_params = {};
1625 	u32 ctrl1, cfgcr1, cfgcr2;
1626 	int ret;
1627 
1628 	/*
1629 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1630 	 * as the DPLL id in this function.
1631 	 */
1632 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1633 
1634 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1635 
1636 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1637 				      i915->dpll.ref_clks.nssc, &wrpll_params);
1638 	if (ret)
1639 		return ret;
1640 
1641 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1642 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1643 		wrpll_params.dco_integer;
1644 
1645 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1646 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1647 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1648 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1649 		wrpll_params.central_freq;
1650 
1651 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1652 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1653 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1654 
1655 	return 0;
1656 }
1657 
1658 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1659 				  const struct intel_shared_dpll *pll,
1660 				  const struct intel_dpll_hw_state *pll_state)
1661 {
1662 	int ref_clock = i915->dpll.ref_clks.nssc;
1663 	u32 p0, p1, p2, dco_freq;
1664 
1665 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1666 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1667 
1668 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1669 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1670 	else
1671 		p1 = 1;
1672 
1673 
1674 	switch (p0) {
1675 	case DPLL_CFGCR2_PDIV_1:
1676 		p0 = 1;
1677 		break;
1678 	case DPLL_CFGCR2_PDIV_2:
1679 		p0 = 2;
1680 		break;
1681 	case DPLL_CFGCR2_PDIV_3:
1682 		p0 = 3;
1683 		break;
1684 	case DPLL_CFGCR2_PDIV_7_INVALID:
1685 		/*
1686 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1687 		 * handling it the same way as PDIV_7.
1688 		 */
1689 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1690 		fallthrough;
1691 	case DPLL_CFGCR2_PDIV_7:
1692 		p0 = 7;
1693 		break;
1694 	default:
1695 		MISSING_CASE(p0);
1696 		return 0;
1697 	}
1698 
1699 	switch (p2) {
1700 	case DPLL_CFGCR2_KDIV_5:
1701 		p2 = 5;
1702 		break;
1703 	case DPLL_CFGCR2_KDIV_2:
1704 		p2 = 2;
1705 		break;
1706 	case DPLL_CFGCR2_KDIV_3:
1707 		p2 = 3;
1708 		break;
1709 	case DPLL_CFGCR2_KDIV_1:
1710 		p2 = 1;
1711 		break;
1712 	default:
1713 		MISSING_CASE(p2);
1714 		return 0;
1715 	}
1716 
1717 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1718 		   ref_clock;
1719 
1720 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1721 		    ref_clock / 0x8000;
1722 
1723 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1724 		return 0;
1725 
1726 	return dco_freq / (p0 * p1 * p2 * 5);
1727 }
1728 
1729 static int
1730 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1731 {
1732 	u32 ctrl1;
1733 
1734 	/*
1735 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1736 	 * as the DPLL id in this function.
1737 	 */
1738 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1739 	switch (crtc_state->port_clock / 2) {
1740 	case 81000:
1741 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1742 		break;
1743 	case 135000:
1744 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1745 		break;
1746 	case 270000:
1747 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1748 		break;
1749 		/* eDP 1.4 rates */
1750 	case 162000:
1751 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1752 		break;
1753 	case 108000:
1754 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1755 		break;
1756 	case 216000:
1757 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1758 		break;
1759 	}
1760 
1761 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1762 
1763 	return 0;
1764 }
1765 
1766 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1767 				  const struct intel_shared_dpll *pll,
1768 				  const struct intel_dpll_hw_state *pll_state)
1769 {
1770 	int link_clock = 0;
1771 
1772 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1773 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1774 	case DPLL_CTRL1_LINK_RATE_810:
1775 		link_clock = 81000;
1776 		break;
1777 	case DPLL_CTRL1_LINK_RATE_1080:
1778 		link_clock = 108000;
1779 		break;
1780 	case DPLL_CTRL1_LINK_RATE_1350:
1781 		link_clock = 135000;
1782 		break;
1783 	case DPLL_CTRL1_LINK_RATE_1620:
1784 		link_clock = 162000;
1785 		break;
1786 	case DPLL_CTRL1_LINK_RATE_2160:
1787 		link_clock = 216000;
1788 		break;
1789 	case DPLL_CTRL1_LINK_RATE_2700:
1790 		link_clock = 270000;
1791 		break;
1792 	default:
1793 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1794 		break;
1795 	}
1796 
1797 	return link_clock * 2;
1798 }
1799 
1800 static int skl_compute_dpll(struct intel_atomic_state *state,
1801 			    struct intel_crtc *crtc,
1802 			    struct intel_encoder *encoder)
1803 {
1804 	struct intel_crtc_state *crtc_state =
1805 		intel_atomic_get_new_crtc_state(state, crtc);
1806 
1807 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1808 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1809 	else if (intel_crtc_has_dp_encoder(crtc_state))
1810 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1811 	else
1812 		return -EINVAL;
1813 }
1814 
1815 static int skl_get_dpll(struct intel_atomic_state *state,
1816 			struct intel_crtc *crtc,
1817 			struct intel_encoder *encoder)
1818 {
1819 	struct intel_crtc_state *crtc_state =
1820 		intel_atomic_get_new_crtc_state(state, crtc);
1821 	struct intel_shared_dpll *pll;
1822 
1823 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1824 		pll = intel_find_shared_dpll(state, crtc,
1825 					     &crtc_state->dpll_hw_state,
1826 					     BIT(DPLL_ID_SKL_DPLL0));
1827 	else
1828 		pll = intel_find_shared_dpll(state, crtc,
1829 					     &crtc_state->dpll_hw_state,
1830 					     BIT(DPLL_ID_SKL_DPLL3) |
1831 					     BIT(DPLL_ID_SKL_DPLL2) |
1832 					     BIT(DPLL_ID_SKL_DPLL1));
1833 	if (!pll)
1834 		return -EINVAL;
1835 
1836 	intel_reference_shared_dpll(state, crtc,
1837 				    pll, &crtc_state->dpll_hw_state);
1838 
1839 	crtc_state->shared_dpll = pll;
1840 
1841 	return 0;
1842 }
1843 
1844 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1845 				const struct intel_shared_dpll *pll,
1846 				const struct intel_dpll_hw_state *pll_state)
1847 {
1848 	/*
1849 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1850 	 * the internal shift for each field
1851 	 */
1852 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1853 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1854 	else
1855 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1856 }
1857 
1858 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1859 {
1860 	/* No SSC ref */
1861 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1862 }
1863 
1864 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1865 			      const struct intel_dpll_hw_state *hw_state)
1866 {
1867 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1868 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1869 		      hw_state->ctrl1,
1870 		      hw_state->cfgcr1,
1871 		      hw_state->cfgcr2);
1872 }
1873 
1874 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1875 	.enable = skl_ddi_pll_enable,
1876 	.disable = skl_ddi_pll_disable,
1877 	.get_hw_state = skl_ddi_pll_get_hw_state,
1878 	.get_freq = skl_ddi_pll_get_freq,
1879 };
1880 
1881 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1882 	.enable = skl_ddi_dpll0_enable,
1883 	.disable = skl_ddi_dpll0_disable,
1884 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1885 	.get_freq = skl_ddi_pll_get_freq,
1886 };
1887 
1888 static const struct dpll_info skl_plls[] = {
1889 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1890 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1891 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1892 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1893 	{ },
1894 };
1895 
1896 static const struct intel_dpll_mgr skl_pll_mgr = {
1897 	.dpll_info = skl_plls,
1898 	.compute_dplls = skl_compute_dpll,
1899 	.get_dplls = skl_get_dpll,
1900 	.put_dplls = intel_put_dpll,
1901 	.update_ref_clks = skl_update_dpll_ref_clks,
1902 	.dump_hw_state = skl_dump_hw_state,
1903 };
1904 
1905 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1906 				struct intel_shared_dpll *pll)
1907 {
1908 	u32 temp;
1909 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1910 	enum dpio_phy phy;
1911 	enum dpio_channel ch;
1912 
1913 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1914 
1915 	/* Non-SSC reference */
1916 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1917 	temp |= PORT_PLL_REF_SEL;
1918 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1919 
1920 	if (IS_GEMINILAKE(dev_priv)) {
1921 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1922 		temp |= PORT_PLL_POWER_ENABLE;
1923 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1924 
1925 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1926 				 PORT_PLL_POWER_STATE), 200))
1927 			drm_err(&dev_priv->drm,
1928 				"Power state not set for PLL:%d\n", port);
1929 	}
1930 
1931 	/* Disable 10 bit clock */
1932 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1933 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1934 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1935 
1936 	/* Write P1 & P2 */
1937 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1938 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1939 	temp |= pll->state.hw_state.ebb0;
1940 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1941 
1942 	/* Write M2 integer */
1943 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1944 	temp &= ~PORT_PLL_M2_INT_MASK;
1945 	temp |= pll->state.hw_state.pll0;
1946 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1947 
1948 	/* Write N */
1949 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1950 	temp &= ~PORT_PLL_N_MASK;
1951 	temp |= pll->state.hw_state.pll1;
1952 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1953 
1954 	/* Write M2 fraction */
1955 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1956 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1957 	temp |= pll->state.hw_state.pll2;
1958 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1959 
1960 	/* Write M2 fraction enable */
1961 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1962 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1963 	temp |= pll->state.hw_state.pll3;
1964 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1965 
1966 	/* Write coeff */
1967 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1968 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1969 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1970 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1971 	temp |= pll->state.hw_state.pll6;
1972 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1973 
1974 	/* Write calibration val */
1975 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1976 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1977 	temp |= pll->state.hw_state.pll8;
1978 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1979 
1980 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1981 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1982 	temp |= pll->state.hw_state.pll9;
1983 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1984 
1985 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1986 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1987 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1988 	temp |= pll->state.hw_state.pll10;
1989 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1990 
1991 	/* Recalibrate with new settings */
1992 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1993 	temp |= PORT_PLL_RECALIBRATE;
1994 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1995 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1996 	temp |= pll->state.hw_state.ebb4;
1997 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1998 
1999 	/* Enable PLL */
2000 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2001 	temp |= PORT_PLL_ENABLE;
2002 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2003 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2004 
2005 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
2006 			200))
2007 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
2008 
2009 	if (IS_GEMINILAKE(dev_priv)) {
2010 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2011 		temp |= DCC_DELAY_RANGE_2;
2012 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2013 	}
2014 
2015 	/*
2016 	 * While we write to the group register to program all lanes at once we
2017 	 * can read only lane registers and we pick lanes 0/1 for that.
2018 	 */
2019 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2020 	temp &= ~LANE_STAGGER_MASK;
2021 	temp &= ~LANESTAGGER_STRAP_OVRD;
2022 	temp |= pll->state.hw_state.pcsdw12;
2023 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2024 }
2025 
2026 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
2027 					struct intel_shared_dpll *pll)
2028 {
2029 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2030 	u32 temp;
2031 
2032 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2033 	temp &= ~PORT_PLL_ENABLE;
2034 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2035 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2036 
2037 	if (IS_GEMINILAKE(dev_priv)) {
2038 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2039 		temp &= ~PORT_PLL_POWER_ENABLE;
2040 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
2041 
2042 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2043 				  PORT_PLL_POWER_STATE), 200))
2044 			drm_err(&dev_priv->drm,
2045 				"Power state not reset for PLL:%d\n", port);
2046 	}
2047 }
2048 
2049 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2050 					struct intel_shared_dpll *pll,
2051 					struct intel_dpll_hw_state *hw_state)
2052 {
2053 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2054 	intel_wakeref_t wakeref;
2055 	enum dpio_phy phy;
2056 	enum dpio_channel ch;
2057 	u32 val;
2058 	bool ret;
2059 
2060 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2061 
2062 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2063 						     POWER_DOMAIN_DISPLAY_CORE);
2064 	if (!wakeref)
2065 		return false;
2066 
2067 	ret = false;
2068 
2069 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2070 	if (!(val & PORT_PLL_ENABLE))
2071 		goto out;
2072 
2073 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2074 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2075 
2076 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2077 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2078 
2079 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2080 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2081 
2082 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2083 	hw_state->pll1 &= PORT_PLL_N_MASK;
2084 
2085 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2086 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2087 
2088 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2089 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2090 
2091 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2092 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2093 			  PORT_PLL_INT_COEFF_MASK |
2094 			  PORT_PLL_GAIN_CTL_MASK;
2095 
2096 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2097 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2098 
2099 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2100 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2101 
2102 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2103 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2104 			   PORT_PLL_DCO_AMP_MASK;
2105 
2106 	/*
2107 	 * While we write to the group register to program all lanes at once we
2108 	 * can read only lane registers. We configure all lanes the same way, so
2109 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2110 	 */
2111 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2112 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2113 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2114 		drm_dbg(&dev_priv->drm,
2115 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2116 			hw_state->pcsdw12,
2117 			intel_de_read(dev_priv,
2118 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2119 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2120 
2121 	ret = true;
2122 
2123 out:
2124 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2125 
2126 	return ret;
2127 }
2128 
2129 /* pre-calculated values for DP linkrates */
2130 static const struct dpll bxt_dp_clk_val[] = {
2131 	/* m2 is .22 binary fixed point */
2132 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2133 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2134 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2135 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2136 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2137 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2138 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2139 };
2140 
2141 static int
2142 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2143 			  struct dpll *clk_div)
2144 {
2145 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2146 
2147 	/* Calculate HDMI div */
2148 	/*
2149 	 * FIXME: tie the following calculation into
2150 	 * i9xx_crtc_compute_clock
2151 	 */
2152 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2153 		return -EINVAL;
2154 
2155 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2156 
2157 	return 0;
2158 }
2159 
2160 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2161 				    struct dpll *clk_div)
2162 {
2163 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2164 	int i;
2165 
2166 	*clk_div = bxt_dp_clk_val[0];
2167 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2168 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2169 			*clk_div = bxt_dp_clk_val[i];
2170 			break;
2171 		}
2172 	}
2173 
2174 	chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div);
2175 
2176 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2177 		    clk_div->dot != crtc_state->port_clock);
2178 }
2179 
2180 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2181 				     const struct dpll *clk_div)
2182 {
2183 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2184 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2185 	int clock = crtc_state->port_clock;
2186 	int vco = clk_div->vco;
2187 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2188 	u32 lanestagger;
2189 
2190 	if (vco >= 6200000 && vco <= 6700000) {
2191 		prop_coef = 4;
2192 		int_coef = 9;
2193 		gain_ctl = 3;
2194 		targ_cnt = 8;
2195 	} else if ((vco > 5400000 && vco < 6200000) ||
2196 			(vco >= 4800000 && vco < 5400000)) {
2197 		prop_coef = 5;
2198 		int_coef = 11;
2199 		gain_ctl = 3;
2200 		targ_cnt = 9;
2201 	} else if (vco == 5400000) {
2202 		prop_coef = 3;
2203 		int_coef = 8;
2204 		gain_ctl = 1;
2205 		targ_cnt = 9;
2206 	} else {
2207 		drm_err(&i915->drm, "Invalid VCO\n");
2208 		return -EINVAL;
2209 	}
2210 
2211 	if (clock > 270000)
2212 		lanestagger = 0x18;
2213 	else if (clock > 135000)
2214 		lanestagger = 0x0d;
2215 	else if (clock > 67000)
2216 		lanestagger = 0x07;
2217 	else if (clock > 33000)
2218 		lanestagger = 0x04;
2219 	else
2220 		lanestagger = 0x02;
2221 
2222 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2223 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2224 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2225 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2226 
2227 	if (clk_div->m2 & 0x3fffff)
2228 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2229 
2230 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2231 		PORT_PLL_INT_COEFF(int_coef) |
2232 		PORT_PLL_GAIN_CTL(gain_ctl);
2233 
2234 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2235 
2236 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2237 
2238 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2239 		PORT_PLL_DCO_AMP_OVR_EN_H;
2240 
2241 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2242 
2243 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2244 
2245 	return 0;
2246 }
2247 
2248 static int
2249 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2250 {
2251 	struct dpll clk_div = {};
2252 
2253 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2254 
2255 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2256 }
2257 
2258 static int
2259 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2260 {
2261 	struct dpll clk_div = {};
2262 
2263 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2264 
2265 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2266 }
2267 
2268 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2269 				const struct intel_shared_dpll *pll,
2270 				const struct intel_dpll_hw_state *pll_state)
2271 {
2272 	struct dpll clock;
2273 
2274 	clock.m1 = 2;
2275 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2276 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2277 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2278 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2279 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2280 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2281 
2282 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2283 }
2284 
2285 static int bxt_compute_dpll(struct intel_atomic_state *state,
2286 			    struct intel_crtc *crtc,
2287 			    struct intel_encoder *encoder)
2288 {
2289 	struct intel_crtc_state *crtc_state =
2290 		intel_atomic_get_new_crtc_state(state, crtc);
2291 
2292 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2293 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2294 	else if (intel_crtc_has_dp_encoder(crtc_state))
2295 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2296 	else
2297 		return -EINVAL;
2298 }
2299 
2300 static int bxt_get_dpll(struct intel_atomic_state *state,
2301 			struct intel_crtc *crtc,
2302 			struct intel_encoder *encoder)
2303 {
2304 	struct intel_crtc_state *crtc_state =
2305 		intel_atomic_get_new_crtc_state(state, crtc);
2306 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2307 	struct intel_shared_dpll *pll;
2308 	enum intel_dpll_id id;
2309 
2310 	/* 1:1 mapping between ports and PLLs */
2311 	id = (enum intel_dpll_id) encoder->port;
2312 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2313 
2314 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2315 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2316 
2317 	intel_reference_shared_dpll(state, crtc,
2318 				    pll, &crtc_state->dpll_hw_state);
2319 
2320 	crtc_state->shared_dpll = pll;
2321 
2322 	return 0;
2323 }
2324 
2325 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2326 {
2327 	i915->dpll.ref_clks.ssc = 100000;
2328 	i915->dpll.ref_clks.nssc = 100000;
2329 	/* DSI non-SSC ref 19.2MHz */
2330 }
2331 
2332 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2333 			      const struct intel_dpll_hw_state *hw_state)
2334 {
2335 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2336 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2337 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2338 		    hw_state->ebb0,
2339 		    hw_state->ebb4,
2340 		    hw_state->pll0,
2341 		    hw_state->pll1,
2342 		    hw_state->pll2,
2343 		    hw_state->pll3,
2344 		    hw_state->pll6,
2345 		    hw_state->pll8,
2346 		    hw_state->pll9,
2347 		    hw_state->pll10,
2348 		    hw_state->pcsdw12);
2349 }
2350 
2351 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2352 	.enable = bxt_ddi_pll_enable,
2353 	.disable = bxt_ddi_pll_disable,
2354 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2355 	.get_freq = bxt_ddi_pll_get_freq,
2356 };
2357 
2358 static const struct dpll_info bxt_plls[] = {
2359 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2360 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2361 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2362 	{ },
2363 };
2364 
2365 static const struct intel_dpll_mgr bxt_pll_mgr = {
2366 	.dpll_info = bxt_plls,
2367 	.compute_dplls = bxt_compute_dpll,
2368 	.get_dplls = bxt_get_dpll,
2369 	.put_dplls = intel_put_dpll,
2370 	.update_ref_clks = bxt_update_dpll_ref_clks,
2371 	.dump_hw_state = bxt_dump_hw_state,
2372 };
2373 
2374 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2375 				      int *qdiv, int *kdiv)
2376 {
2377 	/* even dividers */
2378 	if (bestdiv % 2 == 0) {
2379 		if (bestdiv == 2) {
2380 			*pdiv = 2;
2381 			*qdiv = 1;
2382 			*kdiv = 1;
2383 		} else if (bestdiv % 4 == 0) {
2384 			*pdiv = 2;
2385 			*qdiv = bestdiv / 4;
2386 			*kdiv = 2;
2387 		} else if (bestdiv % 6 == 0) {
2388 			*pdiv = 3;
2389 			*qdiv = bestdiv / 6;
2390 			*kdiv = 2;
2391 		} else if (bestdiv % 5 == 0) {
2392 			*pdiv = 5;
2393 			*qdiv = bestdiv / 10;
2394 			*kdiv = 2;
2395 		} else if (bestdiv % 14 == 0) {
2396 			*pdiv = 7;
2397 			*qdiv = bestdiv / 14;
2398 			*kdiv = 2;
2399 		}
2400 	} else {
2401 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2402 			*pdiv = bestdiv;
2403 			*qdiv = 1;
2404 			*kdiv = 1;
2405 		} else { /* 9, 15, 21 */
2406 			*pdiv = bestdiv / 3;
2407 			*qdiv = 1;
2408 			*kdiv = 3;
2409 		}
2410 	}
2411 }
2412 
2413 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2414 				      u32 dco_freq, u32 ref_freq,
2415 				      int pdiv, int qdiv, int kdiv)
2416 {
2417 	u32 dco;
2418 
2419 	switch (kdiv) {
2420 	case 1:
2421 		params->kdiv = 1;
2422 		break;
2423 	case 2:
2424 		params->kdiv = 2;
2425 		break;
2426 	case 3:
2427 		params->kdiv = 4;
2428 		break;
2429 	default:
2430 		WARN(1, "Incorrect KDiv\n");
2431 	}
2432 
2433 	switch (pdiv) {
2434 	case 2:
2435 		params->pdiv = 1;
2436 		break;
2437 	case 3:
2438 		params->pdiv = 2;
2439 		break;
2440 	case 5:
2441 		params->pdiv = 4;
2442 		break;
2443 	case 7:
2444 		params->pdiv = 8;
2445 		break;
2446 	default:
2447 		WARN(1, "Incorrect PDiv\n");
2448 	}
2449 
2450 	WARN_ON(kdiv != 2 && qdiv != 1);
2451 
2452 	params->qdiv_ratio = qdiv;
2453 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2454 
2455 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2456 
2457 	params->dco_integer = dco >> 15;
2458 	params->dco_fraction = dco & 0x7fff;
2459 }
2460 
2461 /*
2462  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2463  * Program half of the nominal DCO divider fraction value.
2464  */
2465 static bool
2466 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2467 {
2468 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2469 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2470 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2471 		 i915->dpll.ref_clks.nssc == 38400;
2472 }
2473 
2474 struct icl_combo_pll_params {
2475 	int clock;
2476 	struct skl_wrpll_params wrpll;
2477 };
2478 
2479 /*
2480  * These values alrea already adjusted: they're the bits we write to the
2481  * registers, not the logical values.
2482  */
2483 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2484 	{ 540000,
2485 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2486 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2487 	{ 270000,
2488 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2489 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2490 	{ 162000,
2491 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2492 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2493 	{ 324000,
2494 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2495 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2496 	{ 216000,
2497 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2498 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2499 	{ 432000,
2500 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2501 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2502 	{ 648000,
2503 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2504 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2505 	{ 810000,
2506 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2507 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2508 };
2509 
2510 
2511 /* Also used for 38.4 MHz values. */
2512 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2513 	{ 540000,
2514 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2515 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2516 	{ 270000,
2517 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2518 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2519 	{ 162000,
2520 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2521 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2522 	{ 324000,
2523 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2524 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2525 	{ 216000,
2526 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2527 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2528 	{ 432000,
2529 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2530 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2531 	{ 648000,
2532 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2533 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2534 	{ 810000,
2535 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2536 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2537 };
2538 
2539 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2540 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2541 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2542 };
2543 
2544 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2545 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2546 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2547 };
2548 
2549 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2550 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2551 	/* the following params are unused */
2552 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2553 };
2554 
2555 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2556 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2557 	/* the following params are unused */
2558 };
2559 
2560 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2561 				 struct skl_wrpll_params *pll_params)
2562 {
2563 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2564 	const struct icl_combo_pll_params *params =
2565 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2566 		icl_dp_combo_pll_24MHz_values :
2567 		icl_dp_combo_pll_19_2MHz_values;
2568 	int clock = crtc_state->port_clock;
2569 	int i;
2570 
2571 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2572 		if (clock == params[i].clock) {
2573 			*pll_params = params[i].wrpll;
2574 			return 0;
2575 		}
2576 	}
2577 
2578 	MISSING_CASE(clock);
2579 	return -EINVAL;
2580 }
2581 
2582 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2583 			    struct skl_wrpll_params *pll_params)
2584 {
2585 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2586 
2587 	if (DISPLAY_VER(dev_priv) >= 12) {
2588 		switch (dev_priv->dpll.ref_clks.nssc) {
2589 		default:
2590 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2591 			fallthrough;
2592 		case 19200:
2593 		case 38400:
2594 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2595 			break;
2596 		case 24000:
2597 			*pll_params = tgl_tbt_pll_24MHz_values;
2598 			break;
2599 		}
2600 	} else {
2601 		switch (dev_priv->dpll.ref_clks.nssc) {
2602 		default:
2603 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2604 			fallthrough;
2605 		case 19200:
2606 		case 38400:
2607 			*pll_params = icl_tbt_pll_19_2MHz_values;
2608 			break;
2609 		case 24000:
2610 			*pll_params = icl_tbt_pll_24MHz_values;
2611 			break;
2612 		}
2613 	}
2614 
2615 	return 0;
2616 }
2617 
2618 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2619 				    const struct intel_shared_dpll *pll,
2620 				    const struct intel_dpll_hw_state *pll_state)
2621 {
2622 	/*
2623 	 * The PLL outputs multiple frequencies at the same time, selection is
2624 	 * made at DDI clock mux level.
2625 	 */
2626 	drm_WARN_ON(&i915->drm, 1);
2627 
2628 	return 0;
2629 }
2630 
2631 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2632 {
2633 	int ref_clock = i915->dpll.ref_clks.nssc;
2634 
2635 	/*
2636 	 * For ICL+, the spec states: if reference frequency is 38.4,
2637 	 * use 19.2 because the DPLL automatically divides that by 2.
2638 	 */
2639 	if (ref_clock == 38400)
2640 		ref_clock = 19200;
2641 
2642 	return ref_clock;
2643 }
2644 
2645 static int
2646 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2647 	       struct skl_wrpll_params *wrpll_params)
2648 {
2649 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2650 	int ref_clock = icl_wrpll_ref_clock(i915);
2651 	u32 afe_clock = crtc_state->port_clock * 5;
2652 	u32 dco_min = 7998000;
2653 	u32 dco_max = 10000000;
2654 	u32 dco_mid = (dco_min + dco_max) / 2;
2655 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2656 					 18, 20, 24, 28, 30, 32,  36,  40,
2657 					 42, 44, 48, 50, 52, 54,  56,  60,
2658 					 64, 66, 68, 70, 72, 76,  78,  80,
2659 					 84, 88, 90, 92, 96, 98, 100, 102,
2660 					  3,  5,  7,  9, 15, 21 };
2661 	u32 dco, best_dco = 0, dco_centrality = 0;
2662 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2663 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2664 
2665 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2666 		dco = afe_clock * dividers[d];
2667 
2668 		if (dco <= dco_max && dco >= dco_min) {
2669 			dco_centrality = abs(dco - dco_mid);
2670 
2671 			if (dco_centrality < best_dco_centrality) {
2672 				best_dco_centrality = dco_centrality;
2673 				best_div = dividers[d];
2674 				best_dco = dco;
2675 			}
2676 		}
2677 	}
2678 
2679 	if (best_div == 0)
2680 		return -EINVAL;
2681 
2682 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2683 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2684 				  pdiv, qdiv, kdiv);
2685 
2686 	return 0;
2687 }
2688 
2689 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2690 				      const struct intel_shared_dpll *pll,
2691 				      const struct intel_dpll_hw_state *pll_state)
2692 {
2693 	int ref_clock = icl_wrpll_ref_clock(i915);
2694 	u32 dco_fraction;
2695 	u32 p0, p1, p2, dco_freq;
2696 
2697 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2698 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2699 
2700 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2701 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2702 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2703 	else
2704 		p1 = 1;
2705 
2706 	switch (p0) {
2707 	case DPLL_CFGCR1_PDIV_2:
2708 		p0 = 2;
2709 		break;
2710 	case DPLL_CFGCR1_PDIV_3:
2711 		p0 = 3;
2712 		break;
2713 	case DPLL_CFGCR1_PDIV_5:
2714 		p0 = 5;
2715 		break;
2716 	case DPLL_CFGCR1_PDIV_7:
2717 		p0 = 7;
2718 		break;
2719 	}
2720 
2721 	switch (p2) {
2722 	case DPLL_CFGCR1_KDIV_1:
2723 		p2 = 1;
2724 		break;
2725 	case DPLL_CFGCR1_KDIV_2:
2726 		p2 = 2;
2727 		break;
2728 	case DPLL_CFGCR1_KDIV_3:
2729 		p2 = 3;
2730 		break;
2731 	}
2732 
2733 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2734 		   ref_clock;
2735 
2736 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2737 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2738 
2739 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2740 		dco_fraction *= 2;
2741 
2742 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2743 
2744 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2745 		return 0;
2746 
2747 	return dco_freq / (p0 * p1 * p2 * 5);
2748 }
2749 
2750 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2751 				const struct skl_wrpll_params *pll_params,
2752 				struct intel_dpll_hw_state *pll_state)
2753 {
2754 	u32 dco_fraction = pll_params->dco_fraction;
2755 
2756 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2757 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2758 
2759 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2760 			    pll_params->dco_integer;
2761 
2762 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2763 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2764 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2765 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2766 
2767 	if (DISPLAY_VER(i915) >= 12)
2768 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2769 	else
2770 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2771 
2772 	if (i915->vbt.override_afc_startup)
2773 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
2774 }
2775 
2776 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2777 				    u32 *target_dco_khz,
2778 				    struct intel_dpll_hw_state *state,
2779 				    bool is_dkl)
2780 {
2781 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2782 	u32 dco_min_freq, dco_max_freq;
2783 	unsigned int i;
2784 	int div2;
2785 
2786 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2787 	dco_max_freq = is_dp ? 8100000 : 10000000;
2788 
2789 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2790 		int div1 = div1_vals[i];
2791 
2792 		for (div2 = 10; div2 > 0; div2--) {
2793 			int dco = div1 * div2 * clock_khz * 5;
2794 			int a_divratio, tlinedrv, inputsel;
2795 			u32 hsdiv;
2796 
2797 			if (dco < dco_min_freq || dco > dco_max_freq)
2798 				continue;
2799 
2800 			if (div2 >= 2) {
2801 				/*
2802 				 * Note: a_divratio not matching TGL BSpec
2803 				 * algorithm but matching hardcoded values and
2804 				 * working on HW for DP alt-mode at least
2805 				 */
2806 				a_divratio = is_dp ? 10 : 5;
2807 				tlinedrv = is_dkl ? 1 : 2;
2808 			} else {
2809 				a_divratio = 5;
2810 				tlinedrv = 0;
2811 			}
2812 			inputsel = is_dp ? 0 : 1;
2813 
2814 			switch (div1) {
2815 			default:
2816 				MISSING_CASE(div1);
2817 				fallthrough;
2818 			case 2:
2819 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2820 				break;
2821 			case 3:
2822 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2823 				break;
2824 			case 5:
2825 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2826 				break;
2827 			case 7:
2828 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2829 				break;
2830 			}
2831 
2832 			*target_dco_khz = dco;
2833 
2834 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2835 
2836 			state->mg_clktop2_coreclkctl1 =
2837 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2838 
2839 			state->mg_clktop2_hsclkctl =
2840 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2841 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2842 				hsdiv |
2843 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2844 
2845 			return 0;
2846 		}
2847 	}
2848 
2849 	return -EINVAL;
2850 }
2851 
2852 /*
2853  * The specification for this function uses real numbers, so the math had to be
2854  * adapted to integer-only calculation, that's why it looks so different.
2855  */
2856 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2857 				 struct intel_dpll_hw_state *pll_state)
2858 {
2859 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2860 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2861 	int clock = crtc_state->port_clock;
2862 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2863 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2864 	u32 prop_coeff, int_coeff;
2865 	u32 tdc_targetcnt, feedfwgain;
2866 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2867 	u64 tmp;
2868 	bool use_ssc = false;
2869 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2870 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2871 	int ret;
2872 
2873 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2874 				       pll_state, is_dkl);
2875 	if (ret)
2876 		return ret;
2877 
2878 	m1div = 2;
2879 	m2div_int = dco_khz / (refclk_khz * m1div);
2880 	if (m2div_int > 255) {
2881 		if (!is_dkl) {
2882 			m1div = 4;
2883 			m2div_int = dco_khz / (refclk_khz * m1div);
2884 		}
2885 
2886 		if (m2div_int > 255)
2887 			return -EINVAL;
2888 	}
2889 	m2div_rem = dco_khz % (refclk_khz * m1div);
2890 
2891 	tmp = (u64)m2div_rem * (1 << 22);
2892 	do_div(tmp, refclk_khz * m1div);
2893 	m2div_frac = tmp;
2894 
2895 	switch (refclk_khz) {
2896 	case 19200:
2897 		iref_ndiv = 1;
2898 		iref_trim = 28;
2899 		iref_pulse_w = 1;
2900 		break;
2901 	case 24000:
2902 		iref_ndiv = 1;
2903 		iref_trim = 25;
2904 		iref_pulse_w = 2;
2905 		break;
2906 	case 38400:
2907 		iref_ndiv = 2;
2908 		iref_trim = 28;
2909 		iref_pulse_w = 1;
2910 		break;
2911 	default:
2912 		MISSING_CASE(refclk_khz);
2913 		return -EINVAL;
2914 	}
2915 
2916 	/*
2917 	 * tdc_res = 0.000003
2918 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2919 	 *
2920 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2921 	 * was supposed to be a division, but we rearranged the operations of
2922 	 * the formula to avoid early divisions so we don't multiply the
2923 	 * rounding errors.
2924 	 *
2925 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2926 	 * we also rearrange to work with integers.
2927 	 *
2928 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2929 	 * last division by 10.
2930 	 */
2931 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2932 
2933 	/*
2934 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2935 	 * 32 bits. That's not a problem since we round the division down
2936 	 * anyway.
2937 	 */
2938 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2939 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2940 
2941 	if (dco_khz >= 9000000) {
2942 		prop_coeff = 5;
2943 		int_coeff = 10;
2944 	} else {
2945 		prop_coeff = 4;
2946 		int_coeff = 8;
2947 	}
2948 
2949 	if (use_ssc) {
2950 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2951 		do_div(tmp, refclk_khz * m1div * 10000);
2952 		ssc_stepsize = tmp;
2953 
2954 		tmp = mul_u32_u32(dco_khz, 1000);
2955 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2956 	} else {
2957 		ssc_stepsize = 0;
2958 		ssc_steplen = 0;
2959 	}
2960 	ssc_steplog = 4;
2961 
2962 	/* write pll_state calculations */
2963 	if (is_dkl) {
2964 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2965 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2966 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2967 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2968 		if (dev_priv->vbt.override_afc_startup) {
2969 			u8 val = dev_priv->vbt.override_afc_startup_val;
2970 
2971 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2972 		}
2973 
2974 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2975 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2976 
2977 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2978 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2979 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2980 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2981 
2982 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2983 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2984 
2985 		pll_state->mg_pll_tdc_coldst_bias =
2986 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2987 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2988 
2989 	} else {
2990 		pll_state->mg_pll_div0 =
2991 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2992 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2993 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2994 
2995 		pll_state->mg_pll_div1 =
2996 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2997 			MG_PLL_DIV1_DITHER_DIV_2 |
2998 			MG_PLL_DIV1_NDIVRATIO(1) |
2999 			MG_PLL_DIV1_FBPREDIV(m1div);
3000 
3001 		pll_state->mg_pll_lf =
3002 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3003 			MG_PLL_LF_AFCCNTSEL_512 |
3004 			MG_PLL_LF_GAINCTRL(1) |
3005 			MG_PLL_LF_INT_COEFF(int_coeff) |
3006 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3007 
3008 		pll_state->mg_pll_frac_lock =
3009 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3010 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3011 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3012 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3013 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3014 		if (use_ssc || m2div_rem > 0)
3015 			pll_state->mg_pll_frac_lock |=
3016 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3017 
3018 		pll_state->mg_pll_ssc =
3019 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3020 			MG_PLL_SSC_TYPE(2) |
3021 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3022 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3023 			MG_PLL_SSC_FLLEN |
3024 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3025 
3026 		pll_state->mg_pll_tdc_coldst_bias =
3027 			MG_PLL_TDC_COLDST_COLDSTART |
3028 			MG_PLL_TDC_COLDST_IREFINT_EN |
3029 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3030 			MG_PLL_TDC_TDCOVCCORR_EN |
3031 			MG_PLL_TDC_TDCSEL(3);
3032 
3033 		pll_state->mg_pll_bias =
3034 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3035 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3036 			MG_PLL_BIAS_BIAS_BONUS(10) |
3037 			MG_PLL_BIAS_BIASCAL_EN |
3038 			MG_PLL_BIAS_CTRIM(12) |
3039 			MG_PLL_BIAS_VREF_RDAC(4) |
3040 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3041 
3042 		if (refclk_khz == 38400) {
3043 			pll_state->mg_pll_tdc_coldst_bias_mask =
3044 				MG_PLL_TDC_COLDST_COLDSTART;
3045 			pll_state->mg_pll_bias_mask = 0;
3046 		} else {
3047 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3048 			pll_state->mg_pll_bias_mask = -1U;
3049 		}
3050 
3051 		pll_state->mg_pll_tdc_coldst_bias &=
3052 			pll_state->mg_pll_tdc_coldst_bias_mask;
3053 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3054 	}
3055 
3056 	return 0;
3057 }
3058 
3059 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3060 				   const struct intel_shared_dpll *pll,
3061 				   const struct intel_dpll_hw_state *pll_state)
3062 {
3063 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3064 	u64 tmp;
3065 
3066 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3067 
3068 	if (DISPLAY_VER(dev_priv) >= 12) {
3069 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3070 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3071 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3072 
3073 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3074 			m2_frac = pll_state->mg_pll_bias &
3075 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3076 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3077 		} else {
3078 			m2_frac = 0;
3079 		}
3080 	} else {
3081 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3082 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3083 
3084 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3085 			m2_frac = pll_state->mg_pll_div0 &
3086 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3087 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3088 		} else {
3089 			m2_frac = 0;
3090 		}
3091 	}
3092 
3093 	switch (pll_state->mg_clktop2_hsclkctl &
3094 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3095 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3096 		div1 = 2;
3097 		break;
3098 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3099 		div1 = 3;
3100 		break;
3101 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3102 		div1 = 5;
3103 		break;
3104 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3105 		div1 = 7;
3106 		break;
3107 	default:
3108 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3109 		return 0;
3110 	}
3111 
3112 	div2 = (pll_state->mg_clktop2_hsclkctl &
3113 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3114 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3115 
3116 	/* div2 value of 0 is same as 1 means no div */
3117 	if (div2 == 0)
3118 		div2 = 1;
3119 
3120 	/*
3121 	 * Adjust the original formula to delay the division by 2^22 in order to
3122 	 * minimize possible rounding errors.
3123 	 */
3124 	tmp = (u64)m1 * m2_int * ref_clock +
3125 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3126 	tmp = div_u64(tmp, 5 * div1 * div2);
3127 
3128 	return tmp;
3129 }
3130 
3131 /**
3132  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3133  * @crtc_state: state for the CRTC to select the DPLL for
3134  * @port_dpll_id: the active @port_dpll_id to select
3135  *
3136  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3137  * CRTC.
3138  */
3139 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3140 			      enum icl_port_dpll_id port_dpll_id)
3141 {
3142 	struct icl_port_dpll *port_dpll =
3143 		&crtc_state->icl_port_dplls[port_dpll_id];
3144 
3145 	crtc_state->shared_dpll = port_dpll->pll;
3146 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3147 }
3148 
3149 static void icl_update_active_dpll(struct intel_atomic_state *state,
3150 				   struct intel_crtc *crtc,
3151 				   struct intel_encoder *encoder)
3152 {
3153 	struct intel_crtc_state *crtc_state =
3154 		intel_atomic_get_new_crtc_state(state, crtc);
3155 	struct intel_digital_port *primary_port;
3156 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3157 
3158 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3159 		enc_to_mst(encoder)->primary :
3160 		enc_to_dig_port(encoder);
3161 
3162 	if (primary_port &&
3163 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3164 	     intel_tc_port_in_legacy_mode(primary_port)))
3165 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3166 
3167 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3168 }
3169 
3170 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3171 {
3172 	if (!(i915->hti_state & HDPORT_ENABLED))
3173 		return 0;
3174 
3175 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3176 }
3177 
3178 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3179 				      struct intel_crtc *crtc)
3180 {
3181 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3182 	struct intel_crtc_state *crtc_state =
3183 		intel_atomic_get_new_crtc_state(state, crtc);
3184 	struct icl_port_dpll *port_dpll =
3185 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3186 	struct skl_wrpll_params pll_params = {};
3187 	int ret;
3188 
3189 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3190 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3191 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3192 	else
3193 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3194 
3195 	if (ret)
3196 		return ret;
3197 
3198 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3199 
3200 	return 0;
3201 }
3202 
3203 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3204 				  struct intel_crtc *crtc,
3205 				  struct intel_encoder *encoder)
3206 {
3207 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3208 	struct intel_crtc_state *crtc_state =
3209 		intel_atomic_get_new_crtc_state(state, crtc);
3210 	struct icl_port_dpll *port_dpll =
3211 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3212 	enum port port = encoder->port;
3213 	unsigned long dpll_mask;
3214 
3215 	if (IS_ALDERLAKE_S(dev_priv)) {
3216 		dpll_mask =
3217 			BIT(DPLL_ID_DG1_DPLL3) |
3218 			BIT(DPLL_ID_DG1_DPLL2) |
3219 			BIT(DPLL_ID_ICL_DPLL1) |
3220 			BIT(DPLL_ID_ICL_DPLL0);
3221 	} else if (IS_DG1(dev_priv)) {
3222 		if (port == PORT_D || port == PORT_E) {
3223 			dpll_mask =
3224 				BIT(DPLL_ID_DG1_DPLL2) |
3225 				BIT(DPLL_ID_DG1_DPLL3);
3226 		} else {
3227 			dpll_mask =
3228 				BIT(DPLL_ID_DG1_DPLL0) |
3229 				BIT(DPLL_ID_DG1_DPLL1);
3230 		}
3231 	} else if (IS_ROCKETLAKE(dev_priv)) {
3232 		dpll_mask =
3233 			BIT(DPLL_ID_EHL_DPLL4) |
3234 			BIT(DPLL_ID_ICL_DPLL1) |
3235 			BIT(DPLL_ID_ICL_DPLL0);
3236 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3237 		dpll_mask =
3238 			BIT(DPLL_ID_EHL_DPLL4) |
3239 			BIT(DPLL_ID_ICL_DPLL1) |
3240 			BIT(DPLL_ID_ICL_DPLL0);
3241 	} else {
3242 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3243 	}
3244 
3245 	/* Eliminate DPLLs from consideration if reserved by HTI */
3246 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3247 
3248 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3249 						&port_dpll->hw_state,
3250 						dpll_mask);
3251 	if (!port_dpll->pll)
3252 		return -EINVAL;
3253 
3254 	intel_reference_shared_dpll(state, crtc,
3255 				    port_dpll->pll, &port_dpll->hw_state);
3256 
3257 	icl_update_active_dpll(state, crtc, encoder);
3258 
3259 	return 0;
3260 }
3261 
3262 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3263 				    struct intel_crtc *crtc)
3264 {
3265 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3266 	struct intel_crtc_state *crtc_state =
3267 		intel_atomic_get_new_crtc_state(state, crtc);
3268 	struct icl_port_dpll *port_dpll =
3269 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3270 	struct skl_wrpll_params pll_params = {};
3271 	int ret;
3272 
3273 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3274 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3275 	if (ret)
3276 		return ret;
3277 
3278 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3279 
3280 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3281 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3282 	if (ret)
3283 		return ret;
3284 
3285 	return 0;
3286 }
3287 
3288 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3289 				struct intel_crtc *crtc,
3290 				struct intel_encoder *encoder)
3291 {
3292 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3293 	struct intel_crtc_state *crtc_state =
3294 		intel_atomic_get_new_crtc_state(state, crtc);
3295 	struct icl_port_dpll *port_dpll =
3296 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3297 	enum intel_dpll_id dpll_id;
3298 	int ret;
3299 
3300 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3301 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3302 						&port_dpll->hw_state,
3303 						BIT(DPLL_ID_ICL_TBTPLL));
3304 	if (!port_dpll->pll)
3305 		return -EINVAL;
3306 	intel_reference_shared_dpll(state, crtc,
3307 				    port_dpll->pll, &port_dpll->hw_state);
3308 
3309 
3310 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3311 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3312 							 encoder->port));
3313 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3314 						&port_dpll->hw_state,
3315 						BIT(dpll_id));
3316 	if (!port_dpll->pll) {
3317 		ret = -EINVAL;
3318 		goto err_unreference_tbt_pll;
3319 	}
3320 	intel_reference_shared_dpll(state, crtc,
3321 				    port_dpll->pll, &port_dpll->hw_state);
3322 
3323 	icl_update_active_dpll(state, crtc, encoder);
3324 
3325 	return 0;
3326 
3327 err_unreference_tbt_pll:
3328 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3329 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3330 
3331 	return ret;
3332 }
3333 
3334 static int icl_compute_dplls(struct intel_atomic_state *state,
3335 			     struct intel_crtc *crtc,
3336 			     struct intel_encoder *encoder)
3337 {
3338 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3339 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3340 
3341 	if (intel_phy_is_combo(dev_priv, phy))
3342 		return icl_compute_combo_phy_dpll(state, crtc);
3343 	else if (intel_phy_is_tc(dev_priv, phy))
3344 		return icl_compute_tc_phy_dplls(state, crtc);
3345 
3346 	MISSING_CASE(phy);
3347 
3348 	return 0;
3349 }
3350 
3351 static int icl_get_dplls(struct intel_atomic_state *state,
3352 			 struct intel_crtc *crtc,
3353 			 struct intel_encoder *encoder)
3354 {
3355 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3356 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3357 
3358 	if (intel_phy_is_combo(dev_priv, phy))
3359 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3360 	else if (intel_phy_is_tc(dev_priv, phy))
3361 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3362 
3363 	MISSING_CASE(phy);
3364 
3365 	return -EINVAL;
3366 }
3367 
3368 static void icl_put_dplls(struct intel_atomic_state *state,
3369 			  struct intel_crtc *crtc)
3370 {
3371 	const struct intel_crtc_state *old_crtc_state =
3372 		intel_atomic_get_old_crtc_state(state, crtc);
3373 	struct intel_crtc_state *new_crtc_state =
3374 		intel_atomic_get_new_crtc_state(state, crtc);
3375 	enum icl_port_dpll_id id;
3376 
3377 	new_crtc_state->shared_dpll = NULL;
3378 
3379 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3380 		const struct icl_port_dpll *old_port_dpll =
3381 			&old_crtc_state->icl_port_dplls[id];
3382 		struct icl_port_dpll *new_port_dpll =
3383 			&new_crtc_state->icl_port_dplls[id];
3384 
3385 		new_port_dpll->pll = NULL;
3386 
3387 		if (!old_port_dpll->pll)
3388 			continue;
3389 
3390 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3391 	}
3392 }
3393 
3394 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3395 				struct intel_shared_dpll *pll,
3396 				struct intel_dpll_hw_state *hw_state)
3397 {
3398 	const enum intel_dpll_id id = pll->info->id;
3399 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3400 	intel_wakeref_t wakeref;
3401 	bool ret = false;
3402 	u32 val;
3403 
3404 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3405 
3406 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3407 						     POWER_DOMAIN_DISPLAY_CORE);
3408 	if (!wakeref)
3409 		return false;
3410 
3411 	val = intel_de_read(dev_priv, enable_reg);
3412 	if (!(val & PLL_ENABLE))
3413 		goto out;
3414 
3415 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3416 						  MG_REFCLKIN_CTL(tc_port));
3417 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3418 
3419 	hw_state->mg_clktop2_coreclkctl1 =
3420 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3421 	hw_state->mg_clktop2_coreclkctl1 &=
3422 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3423 
3424 	hw_state->mg_clktop2_hsclkctl =
3425 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3426 	hw_state->mg_clktop2_hsclkctl &=
3427 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3428 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3429 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3430 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3431 
3432 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3433 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3434 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3435 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3436 						   MG_PLL_FRAC_LOCK(tc_port));
3437 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3438 
3439 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3440 	hw_state->mg_pll_tdc_coldst_bias =
3441 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3442 
3443 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3444 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3445 		hw_state->mg_pll_bias_mask = 0;
3446 	} else {
3447 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3448 		hw_state->mg_pll_bias_mask = -1U;
3449 	}
3450 
3451 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3452 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3453 
3454 	ret = true;
3455 out:
3456 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3457 	return ret;
3458 }
3459 
3460 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3461 				 struct intel_shared_dpll *pll,
3462 				 struct intel_dpll_hw_state *hw_state)
3463 {
3464 	const enum intel_dpll_id id = pll->info->id;
3465 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3466 	intel_wakeref_t wakeref;
3467 	bool ret = false;
3468 	u32 val;
3469 
3470 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3471 						     POWER_DOMAIN_DISPLAY_CORE);
3472 	if (!wakeref)
3473 		return false;
3474 
3475 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3476 	if (!(val & PLL_ENABLE))
3477 		goto out;
3478 
3479 	/*
3480 	 * All registers read here have the same HIP_INDEX_REG even though
3481 	 * they are on different building blocks
3482 	 */
3483 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3484 		       HIP_INDEX_VAL(tc_port, 0x2));
3485 
3486 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3487 						  DKL_REFCLKIN_CTL(tc_port));
3488 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3489 
3490 	hw_state->mg_clktop2_hsclkctl =
3491 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3492 	hw_state->mg_clktop2_hsclkctl &=
3493 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3494 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3495 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3496 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3497 
3498 	hw_state->mg_clktop2_coreclkctl1 =
3499 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3500 	hw_state->mg_clktop2_coreclkctl1 &=
3501 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3502 
3503 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3504 	val = DKL_PLL_DIV0_MASK;
3505 	if (dev_priv->vbt.override_afc_startup)
3506 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3507 	hw_state->mg_pll_div0 &= val;
3508 
3509 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3510 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3511 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3512 
3513 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3514 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3515 				 DKL_PLL_SSC_STEP_LEN_MASK |
3516 				 DKL_PLL_SSC_STEP_NUM_MASK |
3517 				 DKL_PLL_SSC_EN);
3518 
3519 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3520 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3521 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3522 
3523 	hw_state->mg_pll_tdc_coldst_bias =
3524 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3525 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3526 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3527 
3528 	ret = true;
3529 out:
3530 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3531 	return ret;
3532 }
3533 
3534 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3535 				 struct intel_shared_dpll *pll,
3536 				 struct intel_dpll_hw_state *hw_state,
3537 				 i915_reg_t enable_reg)
3538 {
3539 	const enum intel_dpll_id id = pll->info->id;
3540 	intel_wakeref_t wakeref;
3541 	bool ret = false;
3542 	u32 val;
3543 
3544 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3545 						     POWER_DOMAIN_DISPLAY_CORE);
3546 	if (!wakeref)
3547 		return false;
3548 
3549 	val = intel_de_read(dev_priv, enable_reg);
3550 	if (!(val & PLL_ENABLE))
3551 		goto out;
3552 
3553 	if (IS_ALDERLAKE_S(dev_priv)) {
3554 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3555 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3556 	} else if (IS_DG1(dev_priv)) {
3557 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3558 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3559 	} else if (IS_ROCKETLAKE(dev_priv)) {
3560 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3561 						 RKL_DPLL_CFGCR0(id));
3562 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3563 						 RKL_DPLL_CFGCR1(id));
3564 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3565 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3566 						 TGL_DPLL_CFGCR0(id));
3567 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3568 						 TGL_DPLL_CFGCR1(id));
3569 		if (dev_priv->vbt.override_afc_startup) {
3570 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3571 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3572 		}
3573 	} else {
3574 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3575 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3576 							 ICL_DPLL_CFGCR0(4));
3577 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3578 							 ICL_DPLL_CFGCR1(4));
3579 		} else {
3580 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3581 							 ICL_DPLL_CFGCR0(id));
3582 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3583 							 ICL_DPLL_CFGCR1(id));
3584 		}
3585 	}
3586 
3587 	ret = true;
3588 out:
3589 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3590 	return ret;
3591 }
3592 
3593 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3594 				   struct intel_shared_dpll *pll,
3595 				   struct intel_dpll_hw_state *hw_state)
3596 {
3597 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3598 
3599 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3600 }
3601 
3602 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3603 				 struct intel_shared_dpll *pll,
3604 				 struct intel_dpll_hw_state *hw_state)
3605 {
3606 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3607 }
3608 
3609 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3610 			   struct intel_shared_dpll *pll)
3611 {
3612 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3613 	const enum intel_dpll_id id = pll->info->id;
3614 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3615 
3616 	if (IS_ALDERLAKE_S(dev_priv)) {
3617 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3618 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3619 	} else if (IS_DG1(dev_priv)) {
3620 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3621 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3622 	} else if (IS_ROCKETLAKE(dev_priv)) {
3623 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3624 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3625 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3626 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3627 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3628 		div0_reg = TGL_DPLL0_DIV0(id);
3629 	} else {
3630 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3631 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3632 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3633 		} else {
3634 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3635 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3636 		}
3637 	}
3638 
3639 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3640 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3641 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
3642 			 !i915_mmio_reg_valid(div0_reg));
3643 	if (dev_priv->vbt.override_afc_startup &&
3644 	    i915_mmio_reg_valid(div0_reg))
3645 		intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
3646 			     hw_state->div0);
3647 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3648 }
3649 
3650 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3651 			     struct intel_shared_dpll *pll)
3652 {
3653 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3654 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3655 	u32 val;
3656 
3657 	/*
3658 	 * Some of the following registers have reserved fields, so program
3659 	 * these with RMW based on a mask. The mask can be fixed or generated
3660 	 * during the calc/readout phase if the mask depends on some other HW
3661 	 * state like refclk, see icl_calc_mg_pll_state().
3662 	 */
3663 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3664 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3665 	val |= hw_state->mg_refclkin_ctl;
3666 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3667 
3668 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3669 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3670 	val |= hw_state->mg_clktop2_coreclkctl1;
3671 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3672 
3673 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3674 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3675 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3676 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3677 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3678 	val |= hw_state->mg_clktop2_hsclkctl;
3679 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3680 
3681 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3682 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3683 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3684 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3685 		       hw_state->mg_pll_frac_lock);
3686 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3687 
3688 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3689 	val &= ~hw_state->mg_pll_bias_mask;
3690 	val |= hw_state->mg_pll_bias;
3691 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3692 
3693 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3694 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3695 	val |= hw_state->mg_pll_tdc_coldst_bias;
3696 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3697 
3698 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3699 }
3700 
3701 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3702 			  struct intel_shared_dpll *pll)
3703 {
3704 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3705 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3706 	u32 val;
3707 
3708 	/*
3709 	 * All registers programmed here have the same HIP_INDEX_REG even
3710 	 * though on different building block
3711 	 */
3712 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3713 		       HIP_INDEX_VAL(tc_port, 0x2));
3714 
3715 	/* All the registers are RMW */
3716 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3717 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3718 	val |= hw_state->mg_refclkin_ctl;
3719 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3720 
3721 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3722 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3723 	val |= hw_state->mg_clktop2_coreclkctl1;
3724 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3725 
3726 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3727 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3728 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3729 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3730 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3731 	val |= hw_state->mg_clktop2_hsclkctl;
3732 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3733 
3734 	val = DKL_PLL_DIV0_MASK;
3735 	if (dev_priv->vbt.override_afc_startup)
3736 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3737 	intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3738 		     hw_state->mg_pll_div0);
3739 
3740 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3741 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3742 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3743 	val |= hw_state->mg_pll_div1;
3744 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3745 
3746 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3747 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3748 		 DKL_PLL_SSC_STEP_LEN_MASK |
3749 		 DKL_PLL_SSC_STEP_NUM_MASK |
3750 		 DKL_PLL_SSC_EN);
3751 	val |= hw_state->mg_pll_ssc;
3752 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3753 
3754 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3755 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3756 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3757 	val |= hw_state->mg_pll_bias;
3758 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3759 
3760 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3761 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3762 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3763 	val |= hw_state->mg_pll_tdc_coldst_bias;
3764 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3765 
3766 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3767 }
3768 
3769 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3770 				 struct intel_shared_dpll *pll,
3771 				 i915_reg_t enable_reg)
3772 {
3773 	u32 val;
3774 
3775 	val = intel_de_read(dev_priv, enable_reg);
3776 	val |= PLL_POWER_ENABLE;
3777 	intel_de_write(dev_priv, enable_reg, val);
3778 
3779 	/*
3780 	 * The spec says we need to "wait" but it also says it should be
3781 	 * immediate.
3782 	 */
3783 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3784 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3785 			pll->info->id);
3786 }
3787 
3788 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3789 			   struct intel_shared_dpll *pll,
3790 			   i915_reg_t enable_reg)
3791 {
3792 	u32 val;
3793 
3794 	val = intel_de_read(dev_priv, enable_reg);
3795 	val |= PLL_ENABLE;
3796 	intel_de_write(dev_priv, enable_reg, val);
3797 
3798 	/* Timeout is actually 600us. */
3799 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3800 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3801 }
3802 
3803 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3804 {
3805 	u32 val;
3806 
3807 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3808 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3809 		return;
3810 	/*
3811 	 * Wa_16011069516:adl-p[a0]
3812 	 *
3813 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3814 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3815 	 * sanity check this assumption with a double read, which presumably
3816 	 * returns the correct value even with clock gating on.
3817 	 *
3818 	 * Instead of the usual place for workarounds we apply this one here,
3819 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3820 	 */
3821 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3822 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3823 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3824 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3825 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3826 }
3827 
3828 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3829 			     struct intel_shared_dpll *pll)
3830 {
3831 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3832 
3833 	if (IS_JSL_EHL(dev_priv) &&
3834 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3835 
3836 		/*
3837 		 * We need to disable DC states when this DPLL is enabled.
3838 		 * This can be done by taking a reference on DPLL4 power
3839 		 * domain.
3840 		 */
3841 		pll->wakeref = intel_display_power_get(dev_priv,
3842 						       POWER_DOMAIN_DC_OFF);
3843 	}
3844 
3845 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3846 
3847 	icl_dpll_write(dev_priv, pll);
3848 
3849 	/*
3850 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3851 	 * paths should already be setting the appropriate voltage, hence we do
3852 	 * nothing here.
3853 	 */
3854 
3855 	icl_pll_enable(dev_priv, pll, enable_reg);
3856 
3857 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3858 
3859 	/* DVFS post sequence would be here. See the comment above. */
3860 }
3861 
3862 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3863 			   struct intel_shared_dpll *pll)
3864 {
3865 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3866 
3867 	icl_dpll_write(dev_priv, pll);
3868 
3869 	/*
3870 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3871 	 * paths should already be setting the appropriate voltage, hence we do
3872 	 * nothing here.
3873 	 */
3874 
3875 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3876 
3877 	/* DVFS post sequence would be here. See the comment above. */
3878 }
3879 
3880 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3881 			  struct intel_shared_dpll *pll)
3882 {
3883 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3884 
3885 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3886 
3887 	if (DISPLAY_VER(dev_priv) >= 12)
3888 		dkl_pll_write(dev_priv, pll);
3889 	else
3890 		icl_mg_pll_write(dev_priv, pll);
3891 
3892 	/*
3893 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3894 	 * paths should already be setting the appropriate voltage, hence we do
3895 	 * nothing here.
3896 	 */
3897 
3898 	icl_pll_enable(dev_priv, pll, enable_reg);
3899 
3900 	/* DVFS post sequence would be here. See the comment above. */
3901 }
3902 
3903 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3904 			    struct intel_shared_dpll *pll,
3905 			    i915_reg_t enable_reg)
3906 {
3907 	u32 val;
3908 
3909 	/* The first steps are done by intel_ddi_post_disable(). */
3910 
3911 	/*
3912 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3913 	 * paths should already be setting the appropriate voltage, hence we do
3914 	 * nothing here.
3915 	 */
3916 
3917 	val = intel_de_read(dev_priv, enable_reg);
3918 	val &= ~PLL_ENABLE;
3919 	intel_de_write(dev_priv, enable_reg, val);
3920 
3921 	/* Timeout is actually 1us. */
3922 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3923 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3924 
3925 	/* DVFS post sequence would be here. See the comment above. */
3926 
3927 	val = intel_de_read(dev_priv, enable_reg);
3928 	val &= ~PLL_POWER_ENABLE;
3929 	intel_de_write(dev_priv, enable_reg, val);
3930 
3931 	/*
3932 	 * The spec says we need to "wait" but it also says it should be
3933 	 * immediate.
3934 	 */
3935 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3936 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3937 			pll->info->id);
3938 }
3939 
3940 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3941 			      struct intel_shared_dpll *pll)
3942 {
3943 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3944 
3945 	icl_pll_disable(dev_priv, pll, enable_reg);
3946 
3947 	if (IS_JSL_EHL(dev_priv) &&
3948 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3949 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3950 					pll->wakeref);
3951 }
3952 
3953 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3954 			    struct intel_shared_dpll *pll)
3955 {
3956 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3957 }
3958 
3959 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3960 			   struct intel_shared_dpll *pll)
3961 {
3962 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3963 
3964 	icl_pll_disable(dev_priv, pll, enable_reg);
3965 }
3966 
3967 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3968 {
3969 	/* No SSC ref */
3970 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3971 }
3972 
3973 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3974 			      const struct intel_dpll_hw_state *hw_state)
3975 {
3976 	drm_dbg_kms(&dev_priv->drm,
3977 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3978 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3979 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3980 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3981 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3982 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3983 		    hw_state->cfgcr0, hw_state->cfgcr1,
3984 		    hw_state->div0,
3985 		    hw_state->mg_refclkin_ctl,
3986 		    hw_state->mg_clktop2_coreclkctl1,
3987 		    hw_state->mg_clktop2_hsclkctl,
3988 		    hw_state->mg_pll_div0,
3989 		    hw_state->mg_pll_div1,
3990 		    hw_state->mg_pll_lf,
3991 		    hw_state->mg_pll_frac_lock,
3992 		    hw_state->mg_pll_ssc,
3993 		    hw_state->mg_pll_bias,
3994 		    hw_state->mg_pll_tdc_coldst_bias);
3995 }
3996 
3997 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3998 	.enable = combo_pll_enable,
3999 	.disable = combo_pll_disable,
4000 	.get_hw_state = combo_pll_get_hw_state,
4001 	.get_freq = icl_ddi_combo_pll_get_freq,
4002 };
4003 
4004 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4005 	.enable = tbt_pll_enable,
4006 	.disable = tbt_pll_disable,
4007 	.get_hw_state = tbt_pll_get_hw_state,
4008 	.get_freq = icl_ddi_tbt_pll_get_freq,
4009 };
4010 
4011 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4012 	.enable = mg_pll_enable,
4013 	.disable = mg_pll_disable,
4014 	.get_hw_state = mg_pll_get_hw_state,
4015 	.get_freq = icl_ddi_mg_pll_get_freq,
4016 };
4017 
4018 static const struct dpll_info icl_plls[] = {
4019 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4020 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4021 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4022 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4023 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4024 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4025 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4026 	{ },
4027 };
4028 
4029 static const struct intel_dpll_mgr icl_pll_mgr = {
4030 	.dpll_info = icl_plls,
4031 	.compute_dplls = icl_compute_dplls,
4032 	.get_dplls = icl_get_dplls,
4033 	.put_dplls = icl_put_dplls,
4034 	.update_active_dpll = icl_update_active_dpll,
4035 	.update_ref_clks = icl_update_dpll_ref_clks,
4036 	.dump_hw_state = icl_dump_hw_state,
4037 };
4038 
4039 static const struct dpll_info ehl_plls[] = {
4040 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4041 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4042 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4043 	{ },
4044 };
4045 
4046 static const struct intel_dpll_mgr ehl_pll_mgr = {
4047 	.dpll_info = ehl_plls,
4048 	.compute_dplls = icl_compute_dplls,
4049 	.get_dplls = icl_get_dplls,
4050 	.put_dplls = icl_put_dplls,
4051 	.update_ref_clks = icl_update_dpll_ref_clks,
4052 	.dump_hw_state = icl_dump_hw_state,
4053 };
4054 
4055 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4056 	.enable = mg_pll_enable,
4057 	.disable = mg_pll_disable,
4058 	.get_hw_state = dkl_pll_get_hw_state,
4059 	.get_freq = icl_ddi_mg_pll_get_freq,
4060 };
4061 
4062 static const struct dpll_info tgl_plls[] = {
4063 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4064 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4065 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4066 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4067 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4068 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4069 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4070 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4071 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4072 	{ },
4073 };
4074 
4075 static const struct intel_dpll_mgr tgl_pll_mgr = {
4076 	.dpll_info = tgl_plls,
4077 	.compute_dplls = icl_compute_dplls,
4078 	.get_dplls = icl_get_dplls,
4079 	.put_dplls = icl_put_dplls,
4080 	.update_active_dpll = icl_update_active_dpll,
4081 	.update_ref_clks = icl_update_dpll_ref_clks,
4082 	.dump_hw_state = icl_dump_hw_state,
4083 };
4084 
4085 static const struct dpll_info rkl_plls[] = {
4086 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4087 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4088 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4089 	{ },
4090 };
4091 
4092 static const struct intel_dpll_mgr rkl_pll_mgr = {
4093 	.dpll_info = rkl_plls,
4094 	.compute_dplls = icl_compute_dplls,
4095 	.get_dplls = icl_get_dplls,
4096 	.put_dplls = icl_put_dplls,
4097 	.update_ref_clks = icl_update_dpll_ref_clks,
4098 	.dump_hw_state = icl_dump_hw_state,
4099 };
4100 
4101 static const struct dpll_info dg1_plls[] = {
4102 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4103 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4104 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4105 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4106 	{ },
4107 };
4108 
4109 static const struct intel_dpll_mgr dg1_pll_mgr = {
4110 	.dpll_info = dg1_plls,
4111 	.compute_dplls = icl_compute_dplls,
4112 	.get_dplls = icl_get_dplls,
4113 	.put_dplls = icl_put_dplls,
4114 	.update_ref_clks = icl_update_dpll_ref_clks,
4115 	.dump_hw_state = icl_dump_hw_state,
4116 };
4117 
4118 static const struct dpll_info adls_plls[] = {
4119 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4120 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4121 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4122 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4123 	{ },
4124 };
4125 
4126 static const struct intel_dpll_mgr adls_pll_mgr = {
4127 	.dpll_info = adls_plls,
4128 	.compute_dplls = icl_compute_dplls,
4129 	.get_dplls = icl_get_dplls,
4130 	.put_dplls = icl_put_dplls,
4131 	.update_ref_clks = icl_update_dpll_ref_clks,
4132 	.dump_hw_state = icl_dump_hw_state,
4133 };
4134 
4135 static const struct dpll_info adlp_plls[] = {
4136 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4137 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4138 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4139 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4140 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4141 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4142 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4143 	{ },
4144 };
4145 
4146 static const struct intel_dpll_mgr adlp_pll_mgr = {
4147 	.dpll_info = adlp_plls,
4148 	.compute_dplls = icl_compute_dplls,
4149 	.get_dplls = icl_get_dplls,
4150 	.put_dplls = icl_put_dplls,
4151 	.update_active_dpll = icl_update_active_dpll,
4152 	.update_ref_clks = icl_update_dpll_ref_clks,
4153 	.dump_hw_state = icl_dump_hw_state,
4154 };
4155 
4156 /**
4157  * intel_shared_dpll_init - Initialize shared DPLLs
4158  * @dev_priv: i915 device
4159  *
4160  * Initialize shared DPLLs for @dev_priv.
4161  */
4162 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4163 {
4164 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4165 	const struct dpll_info *dpll_info;
4166 	int i;
4167 
4168 	if (IS_DG2(dev_priv))
4169 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4170 		dpll_mgr = NULL;
4171 	else if (IS_ALDERLAKE_P(dev_priv))
4172 		dpll_mgr = &adlp_pll_mgr;
4173 	else if (IS_ALDERLAKE_S(dev_priv))
4174 		dpll_mgr = &adls_pll_mgr;
4175 	else if (IS_DG1(dev_priv))
4176 		dpll_mgr = &dg1_pll_mgr;
4177 	else if (IS_ROCKETLAKE(dev_priv))
4178 		dpll_mgr = &rkl_pll_mgr;
4179 	else if (DISPLAY_VER(dev_priv) >= 12)
4180 		dpll_mgr = &tgl_pll_mgr;
4181 	else if (IS_JSL_EHL(dev_priv))
4182 		dpll_mgr = &ehl_pll_mgr;
4183 	else if (DISPLAY_VER(dev_priv) >= 11)
4184 		dpll_mgr = &icl_pll_mgr;
4185 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4186 		dpll_mgr = &bxt_pll_mgr;
4187 	else if (DISPLAY_VER(dev_priv) == 9)
4188 		dpll_mgr = &skl_pll_mgr;
4189 	else if (HAS_DDI(dev_priv))
4190 		dpll_mgr = &hsw_pll_mgr;
4191 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4192 		dpll_mgr = &pch_pll_mgr;
4193 
4194 	if (!dpll_mgr) {
4195 		dev_priv->dpll.num_shared_dpll = 0;
4196 		return;
4197 	}
4198 
4199 	dpll_info = dpll_mgr->dpll_info;
4200 
4201 	for (i = 0; dpll_info[i].name; i++) {
4202 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4203 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4204 	}
4205 
4206 	dev_priv->dpll.mgr = dpll_mgr;
4207 	dev_priv->dpll.num_shared_dpll = i;
4208 	mutex_init(&dev_priv->dpll.lock);
4209 
4210 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4211 }
4212 
4213 /**
4214  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4215  * @state: atomic state
4216  * @crtc: CRTC to compute DPLLs for
4217  * @encoder: encoder
4218  *
4219  * This function computes the DPLL state for the given CRTC and encoder.
4220  *
4221  * The new configuration in the atomic commit @state is made effective by
4222  * calling intel_shared_dpll_swap_state().
4223  *
4224  * Returns:
4225  * 0 on success, negative error code on falure.
4226  */
4227 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4228 			       struct intel_crtc *crtc,
4229 			       struct intel_encoder *encoder)
4230 {
4231 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4232 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4233 
4234 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4235 		return -EINVAL;
4236 
4237 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4238 }
4239 
4240 /**
4241  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4242  * @state: atomic state
4243  * @crtc: CRTC to reserve DPLLs for
4244  * @encoder: encoder
4245  *
4246  * This function reserves all required DPLLs for the given CRTC and encoder
4247  * combination in the current atomic commit @state and the new @crtc atomic
4248  * state.
4249  *
4250  * The new configuration in the atomic commit @state is made effective by
4251  * calling intel_shared_dpll_swap_state().
4252  *
4253  * The reserved DPLLs should be released by calling
4254  * intel_release_shared_dplls().
4255  *
4256  * Returns:
4257  * 0 if all required DPLLs were successfully reserved,
4258  * negative error code otherwise.
4259  */
4260 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4261 			       struct intel_crtc *crtc,
4262 			       struct intel_encoder *encoder)
4263 {
4264 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4265 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4266 
4267 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4268 		return -EINVAL;
4269 
4270 	return dpll_mgr->get_dplls(state, crtc, encoder);
4271 }
4272 
4273 /**
4274  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4275  * @state: atomic state
4276  * @crtc: crtc from which the DPLLs are to be released
4277  *
4278  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4279  * from the current atomic commit @state and the old @crtc atomic state.
4280  *
4281  * The new configuration in the atomic commit @state is made effective by
4282  * calling intel_shared_dpll_swap_state().
4283  */
4284 void intel_release_shared_dplls(struct intel_atomic_state *state,
4285 				struct intel_crtc *crtc)
4286 {
4287 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4288 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4289 
4290 	/*
4291 	 * FIXME: this function is called for every platform having a
4292 	 * compute_clock hook, even though the platform doesn't yet support
4293 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4294 	 * called on those.
4295 	 */
4296 	if (!dpll_mgr)
4297 		return;
4298 
4299 	dpll_mgr->put_dplls(state, crtc);
4300 }
4301 
4302 /**
4303  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4304  * @state: atomic state
4305  * @crtc: the CRTC for which to update the active DPLL
4306  * @encoder: encoder determining the type of port DPLL
4307  *
4308  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4309  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4310  * DPLL selected will be based on the current mode of the encoder's port.
4311  */
4312 void intel_update_active_dpll(struct intel_atomic_state *state,
4313 			      struct intel_crtc *crtc,
4314 			      struct intel_encoder *encoder)
4315 {
4316 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4317 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4318 
4319 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4320 		return;
4321 
4322 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4323 }
4324 
4325 /**
4326  * intel_dpll_get_freq - calculate the DPLL's output frequency
4327  * @i915: i915 device
4328  * @pll: DPLL for which to calculate the output frequency
4329  * @pll_state: DPLL state from which to calculate the output frequency
4330  *
4331  * Return the output frequency corresponding to @pll's passed in @pll_state.
4332  */
4333 int intel_dpll_get_freq(struct drm_i915_private *i915,
4334 			const struct intel_shared_dpll *pll,
4335 			const struct intel_dpll_hw_state *pll_state)
4336 {
4337 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4338 		return 0;
4339 
4340 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4341 }
4342 
4343 /**
4344  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4345  * @i915: i915 device
4346  * @pll: DPLL for which to calculate the output frequency
4347  * @hw_state: DPLL's hardware state
4348  *
4349  * Read out @pll's hardware state into @hw_state.
4350  */
4351 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4352 			     struct intel_shared_dpll *pll,
4353 			     struct intel_dpll_hw_state *hw_state)
4354 {
4355 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4356 }
4357 
4358 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4359 				  struct intel_shared_dpll *pll)
4360 {
4361 	struct intel_crtc *crtc;
4362 
4363 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4364 
4365 	if (IS_JSL_EHL(i915) && pll->on &&
4366 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4367 		pll->wakeref = intel_display_power_get(i915,
4368 						       POWER_DOMAIN_DC_OFF);
4369 	}
4370 
4371 	pll->state.pipe_mask = 0;
4372 	for_each_intel_crtc(&i915->drm, crtc) {
4373 		struct intel_crtc_state *crtc_state =
4374 			to_intel_crtc_state(crtc->base.state);
4375 
4376 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4377 			pll->state.pipe_mask |= BIT(crtc->pipe);
4378 	}
4379 	pll->active_mask = pll->state.pipe_mask;
4380 
4381 	drm_dbg_kms(&i915->drm,
4382 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4383 		    pll->info->name, pll->state.pipe_mask, pll->on);
4384 }
4385 
4386 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4387 {
4388 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4389 		i915->dpll.mgr->update_ref_clks(i915);
4390 }
4391 
4392 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4393 {
4394 	int i;
4395 
4396 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4397 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4398 }
4399 
4400 static void sanitize_dpll_state(struct drm_i915_private *i915,
4401 				struct intel_shared_dpll *pll)
4402 {
4403 	if (!pll->on)
4404 		return;
4405 
4406 	adlp_cmtg_clock_gating_wa(i915, pll);
4407 
4408 	if (pll->active_mask)
4409 		return;
4410 
4411 	drm_dbg_kms(&i915->drm,
4412 		    "%s enabled but not in use, disabling\n",
4413 		    pll->info->name);
4414 
4415 	pll->info->funcs->disable(i915, pll);
4416 	pll->on = false;
4417 }
4418 
4419 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4420 {
4421 	int i;
4422 
4423 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4424 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4425 }
4426 
4427 /**
4428  * intel_dpll_dump_hw_state - write hw_state to dmesg
4429  * @dev_priv: i915 drm device
4430  * @hw_state: hw state to be written to the log
4431  *
4432  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4433  */
4434 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4435 			      const struct intel_dpll_hw_state *hw_state)
4436 {
4437 	if (dev_priv->dpll.mgr) {
4438 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4439 	} else {
4440 		/* fallback for platforms that don't use the shared dpll
4441 		 * infrastructure
4442 		 */
4443 		drm_dbg_kms(&dev_priv->drm,
4444 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4445 			    "fp0: 0x%x, fp1: 0x%x\n",
4446 			    hw_state->dpll,
4447 			    hw_state->dpll_md,
4448 			    hw_state->fp0,
4449 			    hw_state->fp1);
4450 	}
4451 }
4452 
4453 static void
4454 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4455 			 struct intel_shared_dpll *pll,
4456 			 struct intel_crtc *crtc,
4457 			 struct intel_crtc_state *new_crtc_state)
4458 {
4459 	struct intel_dpll_hw_state dpll_hw_state;
4460 	u8 pipe_mask;
4461 	bool active;
4462 
4463 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4464 
4465 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4466 
4467 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4468 
4469 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4470 		I915_STATE_WARN(!pll->on && pll->active_mask,
4471 				"pll in active use but not on in sw tracking\n");
4472 		I915_STATE_WARN(pll->on && !pll->active_mask,
4473 				"pll is on but not used by any active pipe\n");
4474 		I915_STATE_WARN(pll->on != active,
4475 				"pll on state mismatch (expected %i, found %i)\n",
4476 				pll->on, active);
4477 	}
4478 
4479 	if (!crtc) {
4480 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4481 				"more active pll users than references: 0x%x vs 0x%x\n",
4482 				pll->active_mask, pll->state.pipe_mask);
4483 
4484 		return;
4485 	}
4486 
4487 	pipe_mask = BIT(crtc->pipe);
4488 
4489 	if (new_crtc_state->hw.active)
4490 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4491 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4492 				pipe_name(crtc->pipe), pll->active_mask);
4493 	else
4494 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4495 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4496 				pipe_name(crtc->pipe), pll->active_mask);
4497 
4498 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4499 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4500 			pipe_mask, pll->state.pipe_mask);
4501 
4502 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4503 					  &dpll_hw_state,
4504 					  sizeof(dpll_hw_state)),
4505 			"pll hw state mismatch\n");
4506 }
4507 
4508 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4509 				    struct intel_crtc_state *old_crtc_state,
4510 				    struct intel_crtc_state *new_crtc_state)
4511 {
4512 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4513 
4514 	if (new_crtc_state->shared_dpll)
4515 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4516 					 crtc, new_crtc_state);
4517 
4518 	if (old_crtc_state->shared_dpll &&
4519 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4520 		u8 pipe_mask = BIT(crtc->pipe);
4521 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4522 
4523 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4524 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4525 				pipe_name(crtc->pipe), pll->active_mask);
4526 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4527 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4528 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4529 	}
4530 }
4531 
4532 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4533 {
4534 	int i;
4535 
4536 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4537 		verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i],
4538 					 NULL, NULL);
4539 }
4540