1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/string_helpers.h>
25 
26 #include "i915_reg.h"
27 #include "intel_de.h"
28 #include "intel_display_types.h"
29 #include "intel_dkl_phy.h"
30 #include "intel_dkl_phy_regs.h"
31 #include "intel_dpio_phy.h"
32 #include "intel_dpll.h"
33 #include "intel_dpll_mgr.h"
34 #include "intel_hti.h"
35 #include "intel_mg_phy_regs.h"
36 #include "intel_pch_refclk.h"
37 #include "intel_tc.h"
38 
39 /**
40  * DOC: Display PLLs
41  *
42  * Display PLLs used for driving outputs vary by platform. While some have
43  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
44  * from a pool. In the latter scenario, it is possible that multiple pipes
45  * share a PLL if their configurations match.
46  *
47  * This file provides an abstraction over display PLLs. The function
48  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
49  * users of a PLL are tracked and that tracking is integrated with the atomic
50  * modset interface. During an atomic operation, required PLLs can be reserved
51  * for a given CRTC and encoder configuration by calling
52  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
53  * with intel_release_shared_dplls().
54  * Changes to the users are first staged in the atomic state, and then made
55  * effective by calling intel_shared_dpll_swap_state() during the atomic
56  * commit phase.
57  */
58 
59 /* platform specific hooks for managing DPLLs */
60 struct intel_shared_dpll_funcs {
61 	/*
62 	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
63 	 * the pll is not already enabled.
64 	 */
65 	void (*enable)(struct drm_i915_private *i915,
66 		       struct intel_shared_dpll *pll);
67 
68 	/*
69 	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
70 	 * only when it is safe to disable the pll, i.e., there are no more
71 	 * tracked users for it.
72 	 */
73 	void (*disable)(struct drm_i915_private *i915,
74 			struct intel_shared_dpll *pll);
75 
76 	/*
77 	 * Hook for reading the values currently programmed to the DPLL
78 	 * registers. This is used for initial hw state readout and state
79 	 * verification after a mode set.
80 	 */
81 	bool (*get_hw_state)(struct drm_i915_private *i915,
82 			     struct intel_shared_dpll *pll,
83 			     struct intel_dpll_hw_state *hw_state);
84 
85 	/*
86 	 * Hook for calculating the pll's output frequency based on its passed
87 	 * in state.
88 	 */
89 	int (*get_freq)(struct drm_i915_private *i915,
90 			const struct intel_shared_dpll *pll,
91 			const struct intel_dpll_hw_state *pll_state);
92 };
93 
94 struct intel_dpll_mgr {
95 	const struct dpll_info *dpll_info;
96 
97 	int (*compute_dplls)(struct intel_atomic_state *state,
98 			     struct intel_crtc *crtc,
99 			     struct intel_encoder *encoder);
100 	int (*get_dplls)(struct intel_atomic_state *state,
101 			 struct intel_crtc *crtc,
102 			 struct intel_encoder *encoder);
103 	void (*put_dplls)(struct intel_atomic_state *state,
104 			  struct intel_crtc *crtc);
105 	void (*update_active_dpll)(struct intel_atomic_state *state,
106 				   struct intel_crtc *crtc,
107 				   struct intel_encoder *encoder);
108 	void (*update_ref_clks)(struct drm_i915_private *i915);
109 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
110 			      const struct intel_dpll_hw_state *hw_state);
111 };
112 
113 static void
114 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
115 				  struct intel_shared_dpll_state *shared_dpll)
116 {
117 	enum intel_dpll_id i;
118 
119 	/* Copy shared dpll state */
120 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
121 		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
122 
123 		shared_dpll[i] = pll->state;
124 	}
125 }
126 
127 static struct intel_shared_dpll_state *
128 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
129 {
130 	struct intel_atomic_state *state = to_intel_atomic_state(s);
131 
132 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
133 
134 	if (!state->dpll_set) {
135 		state->dpll_set = true;
136 
137 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
138 						  state->shared_dpll);
139 	}
140 
141 	return state->shared_dpll;
142 }
143 
144 /**
145  * intel_get_shared_dpll_by_id - get a DPLL given its id
146  * @dev_priv: i915 device instance
147  * @id: pll id
148  *
149  * Returns:
150  * A pointer to the DPLL with @id
151  */
152 struct intel_shared_dpll *
153 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
154 			    enum intel_dpll_id id)
155 {
156 	return &dev_priv->display.dpll.shared_dplls[id];
157 }
158 
159 /* For ILK+ */
160 void assert_shared_dpll(struct drm_i915_private *dev_priv,
161 			struct intel_shared_dpll *pll,
162 			bool state)
163 {
164 	bool cur_state;
165 	struct intel_dpll_hw_state hw_state;
166 
167 	if (drm_WARN(&dev_priv->drm, !pll,
168 		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
169 		return;
170 
171 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 	I915_STATE_WARN(cur_state != state,
173 	     "%s assertion failure (expected %s, current %s)\n",
174 			pll->info->name, str_on_off(state),
175 			str_on_off(cur_state));
176 }
177 
178 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
179 {
180 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
181 }
182 
183 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
184 {
185 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
186 }
187 
188 static i915_reg_t
189 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
190 			   struct intel_shared_dpll *pll)
191 {
192 	if (IS_DG1(i915))
193 		return DG1_DPLL_ENABLE(pll->info->id);
194 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
195 		return MG_PLL_ENABLE(0);
196 
197 	return ICL_DPLL_ENABLE(pll->info->id);
198 }
199 
200 static i915_reg_t
201 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
202 			struct intel_shared_dpll *pll)
203 {
204 	const enum intel_dpll_id id = pll->info->id;
205 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
206 
207 	if (IS_ALDERLAKE_P(i915))
208 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
209 
210 	return MG_PLL_ENABLE(tc_port);
211 }
212 
213 /**
214  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
215  * @crtc_state: CRTC, and its state, which has a shared DPLL
216  *
217  * Enable the shared DPLL used by @crtc.
218  */
219 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
220 {
221 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
222 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
223 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
224 	unsigned int pipe_mask = BIT(crtc->pipe);
225 	unsigned int old_mask;
226 
227 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
228 		return;
229 
230 	mutex_lock(&dev_priv->display.dpll.lock);
231 	old_mask = pll->active_mask;
232 
233 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
234 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
235 		goto out;
236 
237 	pll->active_mask |= pipe_mask;
238 
239 	drm_dbg_kms(&dev_priv->drm,
240 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
241 		    pll->info->name, pll->active_mask, pll->on,
242 		    crtc->base.base.id, crtc->base.name);
243 
244 	if (old_mask) {
245 		drm_WARN_ON(&dev_priv->drm, !pll->on);
246 		assert_shared_dpll_enabled(dev_priv, pll);
247 		goto out;
248 	}
249 	drm_WARN_ON(&dev_priv->drm, pll->on);
250 
251 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
252 	pll->info->funcs->enable(dev_priv, pll);
253 	pll->on = true;
254 
255 out:
256 	mutex_unlock(&dev_priv->display.dpll.lock);
257 }
258 
259 /**
260  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
261  * @crtc_state: CRTC, and its state, which has a shared DPLL
262  *
263  * Disable the shared DPLL used by @crtc.
264  */
265 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
266 {
267 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
268 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
269 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
270 	unsigned int pipe_mask = BIT(crtc->pipe);
271 
272 	/* PCH only available on ILK+ */
273 	if (DISPLAY_VER(dev_priv) < 5)
274 		return;
275 
276 	if (pll == NULL)
277 		return;
278 
279 	mutex_lock(&dev_priv->display.dpll.lock);
280 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
281 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
282 		     crtc->base.base.id, crtc->base.name))
283 		goto out;
284 
285 	drm_dbg_kms(&dev_priv->drm,
286 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
287 		    pll->info->name, pll->active_mask, pll->on,
288 		    crtc->base.base.id, crtc->base.name);
289 
290 	assert_shared_dpll_enabled(dev_priv, pll);
291 	drm_WARN_ON(&dev_priv->drm, !pll->on);
292 
293 	pll->active_mask &= ~pipe_mask;
294 	if (pll->active_mask)
295 		goto out;
296 
297 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
298 	pll->info->funcs->disable(dev_priv, pll);
299 	pll->on = false;
300 
301 out:
302 	mutex_unlock(&dev_priv->display.dpll.lock);
303 }
304 
305 static struct intel_shared_dpll *
306 intel_find_shared_dpll(struct intel_atomic_state *state,
307 		       const struct intel_crtc *crtc,
308 		       const struct intel_dpll_hw_state *pll_state,
309 		       unsigned long dpll_mask)
310 {
311 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
312 	struct intel_shared_dpll *pll, *unused_pll = NULL;
313 	struct intel_shared_dpll_state *shared_dpll;
314 	enum intel_dpll_id i;
315 
316 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
317 
318 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
319 
320 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
321 		pll = &dev_priv->display.dpll.shared_dplls[i];
322 
323 		/* Only want to check enabled timings first */
324 		if (shared_dpll[i].pipe_mask == 0) {
325 			if (!unused_pll)
326 				unused_pll = pll;
327 			continue;
328 		}
329 
330 		if (memcmp(pll_state,
331 			   &shared_dpll[i].hw_state,
332 			   sizeof(*pll_state)) == 0) {
333 			drm_dbg_kms(&dev_priv->drm,
334 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
335 				    crtc->base.base.id, crtc->base.name,
336 				    pll->info->name,
337 				    shared_dpll[i].pipe_mask,
338 				    pll->active_mask);
339 			return pll;
340 		}
341 	}
342 
343 	/* Ok no matching timings, maybe there's a free one? */
344 	if (unused_pll) {
345 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
346 			    crtc->base.base.id, crtc->base.name,
347 			    unused_pll->info->name);
348 		return unused_pll;
349 	}
350 
351 	return NULL;
352 }
353 
354 static void
355 intel_reference_shared_dpll(struct intel_atomic_state *state,
356 			    const struct intel_crtc *crtc,
357 			    const struct intel_shared_dpll *pll,
358 			    const struct intel_dpll_hw_state *pll_state)
359 {
360 	struct drm_i915_private *i915 = to_i915(state->base.dev);
361 	struct intel_shared_dpll_state *shared_dpll;
362 	const enum intel_dpll_id id = pll->info->id;
363 
364 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
365 
366 	if (shared_dpll[id].pipe_mask == 0)
367 		shared_dpll[id].hw_state = *pll_state;
368 
369 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0);
370 
371 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
372 
373 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
374 		    crtc->base.base.id, crtc->base.name, pll->info->name);
375 }
376 
377 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
378 					  const struct intel_crtc *crtc,
379 					  const struct intel_shared_dpll *pll)
380 {
381 	struct drm_i915_private *i915 = to_i915(state->base.dev);
382 	struct intel_shared_dpll_state *shared_dpll;
383 	const enum intel_dpll_id id = pll->info->id;
384 
385 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
386 
387 	drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0);
388 
389 	shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe);
390 
391 	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
392 		    crtc->base.base.id, crtc->base.name, pll->info->name);
393 }
394 
395 static void intel_put_dpll(struct intel_atomic_state *state,
396 			   struct intel_crtc *crtc)
397 {
398 	const struct intel_crtc_state *old_crtc_state =
399 		intel_atomic_get_old_crtc_state(state, crtc);
400 	struct intel_crtc_state *new_crtc_state =
401 		intel_atomic_get_new_crtc_state(state, crtc);
402 
403 	new_crtc_state->shared_dpll = NULL;
404 
405 	if (!old_crtc_state->shared_dpll)
406 		return;
407 
408 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
409 }
410 
411 /**
412  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
413  * @state: atomic state
414  *
415  * This is the dpll version of drm_atomic_helper_swap_state() since the
416  * helper does not handle driver-specific global state.
417  *
418  * For consistency with atomic helpers this function does a complete swap,
419  * i.e. it also puts the current state into @state, even though there is no
420  * need for that at this moment.
421  */
422 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
423 {
424 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
425 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
426 	enum intel_dpll_id i;
427 
428 	if (!state->dpll_set)
429 		return;
430 
431 	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
432 		struct intel_shared_dpll *pll =
433 			&dev_priv->display.dpll.shared_dplls[i];
434 
435 		swap(pll->state, shared_dpll[i]);
436 	}
437 }
438 
439 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
440 				      struct intel_shared_dpll *pll,
441 				      struct intel_dpll_hw_state *hw_state)
442 {
443 	const enum intel_dpll_id id = pll->info->id;
444 	intel_wakeref_t wakeref;
445 	u32 val;
446 
447 	wakeref = intel_display_power_get_if_enabled(dev_priv,
448 						     POWER_DOMAIN_DISPLAY_CORE);
449 	if (!wakeref)
450 		return false;
451 
452 	val = intel_de_read(dev_priv, PCH_DPLL(id));
453 	hw_state->dpll = val;
454 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
455 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
456 
457 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
458 
459 	return val & DPLL_VCO_ENABLE;
460 }
461 
462 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
463 {
464 	u32 val;
465 	bool enabled;
466 
467 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
468 
469 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
470 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
471 			    DREF_SUPERSPREAD_SOURCE_MASK));
472 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
473 }
474 
475 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
476 				struct intel_shared_dpll *pll)
477 {
478 	const enum intel_dpll_id id = pll->info->id;
479 
480 	/* PCH refclock must be enabled first */
481 	ibx_assert_pch_refclk_enabled(dev_priv);
482 
483 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
484 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
485 
486 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
487 
488 	/* Wait for the clocks to stabilize. */
489 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
490 	udelay(150);
491 
492 	/* The pixel multiplier can only be updated once the
493 	 * DPLL is enabled and the clocks are stable.
494 	 *
495 	 * So write it again.
496 	 */
497 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
498 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
499 	udelay(200);
500 }
501 
502 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
503 				 struct intel_shared_dpll *pll)
504 {
505 	const enum intel_dpll_id id = pll->info->id;
506 
507 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
508 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
509 	udelay(200);
510 }
511 
512 static int ibx_compute_dpll(struct intel_atomic_state *state,
513 			    struct intel_crtc *crtc,
514 			    struct intel_encoder *encoder)
515 {
516 	return 0;
517 }
518 
519 static int ibx_get_dpll(struct intel_atomic_state *state,
520 			struct intel_crtc *crtc,
521 			struct intel_encoder *encoder)
522 {
523 	struct intel_crtc_state *crtc_state =
524 		intel_atomic_get_new_crtc_state(state, crtc);
525 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
526 	struct intel_shared_dpll *pll;
527 	enum intel_dpll_id i;
528 
529 	if (HAS_PCH_IBX(dev_priv)) {
530 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
531 		i = (enum intel_dpll_id) crtc->pipe;
532 		pll = &dev_priv->display.dpll.shared_dplls[i];
533 
534 		drm_dbg_kms(&dev_priv->drm,
535 			    "[CRTC:%d:%s] using pre-allocated %s\n",
536 			    crtc->base.base.id, crtc->base.name,
537 			    pll->info->name);
538 	} else {
539 		pll = intel_find_shared_dpll(state, crtc,
540 					     &crtc_state->dpll_hw_state,
541 					     BIT(DPLL_ID_PCH_PLL_B) |
542 					     BIT(DPLL_ID_PCH_PLL_A));
543 	}
544 
545 	if (!pll)
546 		return -EINVAL;
547 
548 	/* reference the pll */
549 	intel_reference_shared_dpll(state, crtc,
550 				    pll, &crtc_state->dpll_hw_state);
551 
552 	crtc_state->shared_dpll = pll;
553 
554 	return 0;
555 }
556 
557 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
558 			      const struct intel_dpll_hw_state *hw_state)
559 {
560 	drm_dbg_kms(&dev_priv->drm,
561 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
562 		    "fp0: 0x%x, fp1: 0x%x\n",
563 		    hw_state->dpll,
564 		    hw_state->dpll_md,
565 		    hw_state->fp0,
566 		    hw_state->fp1);
567 }
568 
569 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
570 	.enable = ibx_pch_dpll_enable,
571 	.disable = ibx_pch_dpll_disable,
572 	.get_hw_state = ibx_pch_dpll_get_hw_state,
573 };
574 
575 static const struct dpll_info pch_plls[] = {
576 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
577 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
578 	{ },
579 };
580 
581 static const struct intel_dpll_mgr pch_pll_mgr = {
582 	.dpll_info = pch_plls,
583 	.compute_dplls = ibx_compute_dpll,
584 	.get_dplls = ibx_get_dpll,
585 	.put_dplls = intel_put_dpll,
586 	.dump_hw_state = ibx_dump_hw_state,
587 };
588 
589 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
590 				 struct intel_shared_dpll *pll)
591 {
592 	const enum intel_dpll_id id = pll->info->id;
593 
594 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
595 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
596 	udelay(20);
597 }
598 
599 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
600 				struct intel_shared_dpll *pll)
601 {
602 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
603 	intel_de_posting_read(dev_priv, SPLL_CTL);
604 	udelay(20);
605 }
606 
607 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
608 				  struct intel_shared_dpll *pll)
609 {
610 	const enum intel_dpll_id id = pll->info->id;
611 
612 	intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
613 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
614 
615 	/*
616 	 * Try to set up the PCH reference clock once all DPLLs
617 	 * that depend on it have been shut down.
618 	 */
619 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
620 		intel_init_pch_refclk(dev_priv);
621 }
622 
623 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
624 				 struct intel_shared_dpll *pll)
625 {
626 	enum intel_dpll_id id = pll->info->id;
627 
628 	intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
629 	intel_de_posting_read(dev_priv, SPLL_CTL);
630 
631 	/*
632 	 * Try to set up the PCH reference clock once all DPLLs
633 	 * that depend on it have been shut down.
634 	 */
635 	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
636 		intel_init_pch_refclk(dev_priv);
637 }
638 
639 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
640 				       struct intel_shared_dpll *pll,
641 				       struct intel_dpll_hw_state *hw_state)
642 {
643 	const enum intel_dpll_id id = pll->info->id;
644 	intel_wakeref_t wakeref;
645 	u32 val;
646 
647 	wakeref = intel_display_power_get_if_enabled(dev_priv,
648 						     POWER_DOMAIN_DISPLAY_CORE);
649 	if (!wakeref)
650 		return false;
651 
652 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
653 	hw_state->wrpll = val;
654 
655 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
656 
657 	return val & WRPLL_PLL_ENABLE;
658 }
659 
660 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
661 				      struct intel_shared_dpll *pll,
662 				      struct intel_dpll_hw_state *hw_state)
663 {
664 	intel_wakeref_t wakeref;
665 	u32 val;
666 
667 	wakeref = intel_display_power_get_if_enabled(dev_priv,
668 						     POWER_DOMAIN_DISPLAY_CORE);
669 	if (!wakeref)
670 		return false;
671 
672 	val = intel_de_read(dev_priv, SPLL_CTL);
673 	hw_state->spll = val;
674 
675 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
676 
677 	return val & SPLL_PLL_ENABLE;
678 }
679 
680 #define LC_FREQ 2700
681 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
682 
683 #define P_MIN 2
684 #define P_MAX 64
685 #define P_INC 2
686 
687 /* Constraints for PLL good behavior */
688 #define REF_MIN 48
689 #define REF_MAX 400
690 #define VCO_MIN 2400
691 #define VCO_MAX 4800
692 
693 struct hsw_wrpll_rnp {
694 	unsigned p, n2, r2;
695 };
696 
697 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
698 {
699 	switch (clock) {
700 	case 25175000:
701 	case 25200000:
702 	case 27000000:
703 	case 27027000:
704 	case 37762500:
705 	case 37800000:
706 	case 40500000:
707 	case 40541000:
708 	case 54000000:
709 	case 54054000:
710 	case 59341000:
711 	case 59400000:
712 	case 72000000:
713 	case 74176000:
714 	case 74250000:
715 	case 81000000:
716 	case 81081000:
717 	case 89012000:
718 	case 89100000:
719 	case 108000000:
720 	case 108108000:
721 	case 111264000:
722 	case 111375000:
723 	case 148352000:
724 	case 148500000:
725 	case 162000000:
726 	case 162162000:
727 	case 222525000:
728 	case 222750000:
729 	case 296703000:
730 	case 297000000:
731 		return 0;
732 	case 233500000:
733 	case 245250000:
734 	case 247750000:
735 	case 253250000:
736 	case 298000000:
737 		return 1500;
738 	case 169128000:
739 	case 169500000:
740 	case 179500000:
741 	case 202000000:
742 		return 2000;
743 	case 256250000:
744 	case 262500000:
745 	case 270000000:
746 	case 272500000:
747 	case 273750000:
748 	case 280750000:
749 	case 281250000:
750 	case 286000000:
751 	case 291750000:
752 		return 4000;
753 	case 267250000:
754 	case 268500000:
755 		return 5000;
756 	default:
757 		return 1000;
758 	}
759 }
760 
761 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
762 				 unsigned int r2, unsigned int n2,
763 				 unsigned int p,
764 				 struct hsw_wrpll_rnp *best)
765 {
766 	u64 a, b, c, d, diff, diff_best;
767 
768 	/* No best (r,n,p) yet */
769 	if (best->p == 0) {
770 		best->p = p;
771 		best->n2 = n2;
772 		best->r2 = r2;
773 		return;
774 	}
775 
776 	/*
777 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
778 	 * freq2k.
779 	 *
780 	 * delta = 1e6 *
781 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
782 	 *	   freq2k;
783 	 *
784 	 * and we would like delta <= budget.
785 	 *
786 	 * If the discrepancy is above the PPM-based budget, always prefer to
787 	 * improve upon the previous solution.  However, if you're within the
788 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
789 	 */
790 	a = freq2k * budget * p * r2;
791 	b = freq2k * budget * best->p * best->r2;
792 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
793 	diff_best = abs_diff(freq2k * best->p * best->r2,
794 			     LC_FREQ_2K * best->n2);
795 	c = 1000000 * diff;
796 	d = 1000000 * diff_best;
797 
798 	if (a < c && b < d) {
799 		/* If both are above the budget, pick the closer */
800 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
801 			best->p = p;
802 			best->n2 = n2;
803 			best->r2 = r2;
804 		}
805 	} else if (a >= c && b < d) {
806 		/* If A is below the threshold but B is above it?  Update. */
807 		best->p = p;
808 		best->n2 = n2;
809 		best->r2 = r2;
810 	} else if (a >= c && b >= d) {
811 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
812 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
813 			best->p = p;
814 			best->n2 = n2;
815 			best->r2 = r2;
816 		}
817 	}
818 	/* Otherwise a < c && b >= d, do nothing */
819 }
820 
821 static void
822 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
823 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
824 {
825 	u64 freq2k;
826 	unsigned p, n2, r2;
827 	struct hsw_wrpll_rnp best = {};
828 	unsigned budget;
829 
830 	freq2k = clock / 100;
831 
832 	budget = hsw_wrpll_get_budget_for_freq(clock);
833 
834 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
835 	 * and directly pass the LC PLL to it. */
836 	if (freq2k == 5400000) {
837 		*n2_out = 2;
838 		*p_out = 1;
839 		*r2_out = 2;
840 		return;
841 	}
842 
843 	/*
844 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
845 	 * the WR PLL.
846 	 *
847 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
848 	 * Injecting R2 = 2 * R gives:
849 	 *   REF_MAX * r2 > LC_FREQ * 2 and
850 	 *   REF_MIN * r2 < LC_FREQ * 2
851 	 *
852 	 * Which means the desired boundaries for r2 are:
853 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
854 	 *
855 	 */
856 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
857 	     r2 <= LC_FREQ * 2 / REF_MIN;
858 	     r2++) {
859 
860 		/*
861 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
862 		 *
863 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
864 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
865 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
866 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
867 		 *
868 		 * Which means the desired boundaries for n2 are:
869 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
870 		 */
871 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
872 		     n2 <= VCO_MAX * r2 / LC_FREQ;
873 		     n2++) {
874 
875 			for (p = P_MIN; p <= P_MAX; p += P_INC)
876 				hsw_wrpll_update_rnp(freq2k, budget,
877 						     r2, n2, p, &best);
878 		}
879 	}
880 
881 	*n2_out = best.n2;
882 	*p_out = best.p;
883 	*r2_out = best.r2;
884 }
885 
886 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
887 				  const struct intel_shared_dpll *pll,
888 				  const struct intel_dpll_hw_state *pll_state)
889 {
890 	int refclk;
891 	int n, p, r;
892 	u32 wrpll = pll_state->wrpll;
893 
894 	switch (wrpll & WRPLL_REF_MASK) {
895 	case WRPLL_REF_SPECIAL_HSW:
896 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
897 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
898 			refclk = dev_priv->display.dpll.ref_clks.nssc;
899 			break;
900 		}
901 		fallthrough;
902 	case WRPLL_REF_PCH_SSC:
903 		/*
904 		 * We could calculate spread here, but our checking
905 		 * code only cares about 5% accuracy, and spread is a max of
906 		 * 0.5% downspread.
907 		 */
908 		refclk = dev_priv->display.dpll.ref_clks.ssc;
909 		break;
910 	case WRPLL_REF_LCPLL:
911 		refclk = 2700000;
912 		break;
913 	default:
914 		MISSING_CASE(wrpll);
915 		return 0;
916 	}
917 
918 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
919 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
920 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
921 
922 	/* Convert to KHz, p & r have a fixed point portion */
923 	return (refclk * n / 10) / (p * r) * 2;
924 }
925 
926 static int
927 hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
928 			   struct intel_crtc *crtc)
929 {
930 	struct drm_i915_private *i915 = to_i915(state->base.dev);
931 	struct intel_crtc_state *crtc_state =
932 		intel_atomic_get_new_crtc_state(state, crtc);
933 	unsigned int p, n2, r2;
934 
935 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
936 
937 	crtc_state->dpll_hw_state.wrpll =
938 		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
939 		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
940 		WRPLL_DIVIDER_POST(p);
941 
942 	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
943 							&crtc_state->dpll_hw_state);
944 
945 	return 0;
946 }
947 
948 static struct intel_shared_dpll *
949 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
950 		       struct intel_crtc *crtc)
951 {
952 	struct intel_crtc_state *crtc_state =
953 		intel_atomic_get_new_crtc_state(state, crtc);
954 
955 	return intel_find_shared_dpll(state, crtc,
956 				      &crtc_state->dpll_hw_state,
957 				      BIT(DPLL_ID_WRPLL2) |
958 				      BIT(DPLL_ID_WRPLL1));
959 }
960 
961 static int
962 hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
963 {
964 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
965 	int clock = crtc_state->port_clock;
966 
967 	switch (clock / 2) {
968 	case 81000:
969 	case 135000:
970 	case 270000:
971 		return 0;
972 	default:
973 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
974 			    clock);
975 		return -EINVAL;
976 	}
977 }
978 
979 static struct intel_shared_dpll *
980 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
981 {
982 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
983 	struct intel_shared_dpll *pll;
984 	enum intel_dpll_id pll_id;
985 	int clock = crtc_state->port_clock;
986 
987 	switch (clock / 2) {
988 	case 81000:
989 		pll_id = DPLL_ID_LCPLL_810;
990 		break;
991 	case 135000:
992 		pll_id = DPLL_ID_LCPLL_1350;
993 		break;
994 	case 270000:
995 		pll_id = DPLL_ID_LCPLL_2700;
996 		break;
997 	default:
998 		MISSING_CASE(clock / 2);
999 		return NULL;
1000 	}
1001 
1002 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
1003 
1004 	if (!pll)
1005 		return NULL;
1006 
1007 	return pll;
1008 }
1009 
1010 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1011 				  const struct intel_shared_dpll *pll,
1012 				  const struct intel_dpll_hw_state *pll_state)
1013 {
1014 	int link_clock = 0;
1015 
1016 	switch (pll->info->id) {
1017 	case DPLL_ID_LCPLL_810:
1018 		link_clock = 81000;
1019 		break;
1020 	case DPLL_ID_LCPLL_1350:
1021 		link_clock = 135000;
1022 		break;
1023 	case DPLL_ID_LCPLL_2700:
1024 		link_clock = 270000;
1025 		break;
1026 	default:
1027 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1028 		break;
1029 	}
1030 
1031 	return link_clock * 2;
1032 }
1033 
1034 static int
1035 hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
1036 			  struct intel_crtc *crtc)
1037 {
1038 	struct intel_crtc_state *crtc_state =
1039 		intel_atomic_get_new_crtc_state(state, crtc);
1040 
1041 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1042 		return -EINVAL;
1043 
1044 	crtc_state->dpll_hw_state.spll =
1045 		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
1046 
1047 	return 0;
1048 }
1049 
1050 static struct intel_shared_dpll *
1051 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1052 		      struct intel_crtc *crtc)
1053 {
1054 	struct intel_crtc_state *crtc_state =
1055 		intel_atomic_get_new_crtc_state(state, crtc);
1056 
1057 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1058 				      BIT(DPLL_ID_SPLL));
1059 }
1060 
1061 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1062 				 const struct intel_shared_dpll *pll,
1063 				 const struct intel_dpll_hw_state *pll_state)
1064 {
1065 	int link_clock = 0;
1066 
1067 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1068 	case SPLL_FREQ_810MHz:
1069 		link_clock = 81000;
1070 		break;
1071 	case SPLL_FREQ_1350MHz:
1072 		link_clock = 135000;
1073 		break;
1074 	case SPLL_FREQ_2700MHz:
1075 		link_clock = 270000;
1076 		break;
1077 	default:
1078 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1079 		break;
1080 	}
1081 
1082 	return link_clock * 2;
1083 }
1084 
1085 static int hsw_compute_dpll(struct intel_atomic_state *state,
1086 			    struct intel_crtc *crtc,
1087 			    struct intel_encoder *encoder)
1088 {
1089 	struct intel_crtc_state *crtc_state =
1090 		intel_atomic_get_new_crtc_state(state, crtc);
1091 
1092 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1093 		return hsw_ddi_wrpll_compute_dpll(state, crtc);
1094 	else if (intel_crtc_has_dp_encoder(crtc_state))
1095 		return hsw_ddi_lcpll_compute_dpll(crtc_state);
1096 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1097 		return hsw_ddi_spll_compute_dpll(state, crtc);
1098 	else
1099 		return -EINVAL;
1100 }
1101 
1102 static int hsw_get_dpll(struct intel_atomic_state *state,
1103 			struct intel_crtc *crtc,
1104 			struct intel_encoder *encoder)
1105 {
1106 	struct intel_crtc_state *crtc_state =
1107 		intel_atomic_get_new_crtc_state(state, crtc);
1108 	struct intel_shared_dpll *pll = NULL;
1109 
1110 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1111 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1112 	else if (intel_crtc_has_dp_encoder(crtc_state))
1113 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1114 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1115 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1116 
1117 	if (!pll)
1118 		return -EINVAL;
1119 
1120 	intel_reference_shared_dpll(state, crtc,
1121 				    pll, &crtc_state->dpll_hw_state);
1122 
1123 	crtc_state->shared_dpll = pll;
1124 
1125 	return 0;
1126 }
1127 
1128 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1129 {
1130 	i915->display.dpll.ref_clks.ssc = 135000;
1131 	/* Non-SSC is only used on non-ULT HSW. */
1132 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1133 		i915->display.dpll.ref_clks.nssc = 24000;
1134 	else
1135 		i915->display.dpll.ref_clks.nssc = 135000;
1136 }
1137 
1138 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1139 			      const struct intel_dpll_hw_state *hw_state)
1140 {
1141 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1142 		    hw_state->wrpll, hw_state->spll);
1143 }
1144 
1145 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1146 	.enable = hsw_ddi_wrpll_enable,
1147 	.disable = hsw_ddi_wrpll_disable,
1148 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1149 	.get_freq = hsw_ddi_wrpll_get_freq,
1150 };
1151 
1152 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1153 	.enable = hsw_ddi_spll_enable,
1154 	.disable = hsw_ddi_spll_disable,
1155 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1156 	.get_freq = hsw_ddi_spll_get_freq,
1157 };
1158 
1159 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1160 				 struct intel_shared_dpll *pll)
1161 {
1162 }
1163 
1164 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1165 				  struct intel_shared_dpll *pll)
1166 {
1167 }
1168 
1169 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1170 				       struct intel_shared_dpll *pll,
1171 				       struct intel_dpll_hw_state *hw_state)
1172 {
1173 	return true;
1174 }
1175 
1176 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1177 	.enable = hsw_ddi_lcpll_enable,
1178 	.disable = hsw_ddi_lcpll_disable,
1179 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1180 	.get_freq = hsw_ddi_lcpll_get_freq,
1181 };
1182 
1183 static const struct dpll_info hsw_plls[] = {
1184 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1185 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1186 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1187 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1188 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1189 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1190 	{ },
1191 };
1192 
1193 static const struct intel_dpll_mgr hsw_pll_mgr = {
1194 	.dpll_info = hsw_plls,
1195 	.compute_dplls = hsw_compute_dpll,
1196 	.get_dplls = hsw_get_dpll,
1197 	.put_dplls = intel_put_dpll,
1198 	.update_ref_clks = hsw_update_dpll_ref_clks,
1199 	.dump_hw_state = hsw_dump_hw_state,
1200 };
1201 
1202 struct skl_dpll_regs {
1203 	i915_reg_t ctl, cfgcr1, cfgcr2;
1204 };
1205 
1206 /* this array is indexed by the *shared* pll id */
1207 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1208 	{
1209 		/* DPLL 0 */
1210 		.ctl = LCPLL1_CTL,
1211 		/* DPLL 0 doesn't support HDMI mode */
1212 	},
1213 	{
1214 		/* DPLL 1 */
1215 		.ctl = LCPLL2_CTL,
1216 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1217 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1218 	},
1219 	{
1220 		/* DPLL 2 */
1221 		.ctl = WRPLL_CTL(0),
1222 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1223 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1224 	},
1225 	{
1226 		/* DPLL 3 */
1227 		.ctl = WRPLL_CTL(1),
1228 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1229 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1230 	},
1231 };
1232 
1233 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1234 				    struct intel_shared_dpll *pll)
1235 {
1236 	const enum intel_dpll_id id = pll->info->id;
1237 
1238 	intel_de_rmw(dev_priv, DPLL_CTRL1,
1239 		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
1240 		     pll->state.hw_state.ctrl1 << (id * 6));
1241 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1242 }
1243 
1244 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1245 			       struct intel_shared_dpll *pll)
1246 {
1247 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1248 	const enum intel_dpll_id id = pll->info->id;
1249 
1250 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1251 
1252 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1253 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1254 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1255 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1256 
1257 	/* the enable bit is always bit 31 */
1258 	intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1259 
1260 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1261 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1262 }
1263 
1264 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1265 				 struct intel_shared_dpll *pll)
1266 {
1267 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1268 }
1269 
1270 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1271 				struct intel_shared_dpll *pll)
1272 {
1273 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1274 	const enum intel_dpll_id id = pll->info->id;
1275 
1276 	/* the enable bit is always bit 31 */
1277 	intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1278 	intel_de_posting_read(dev_priv, regs[id].ctl);
1279 }
1280 
1281 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1282 				  struct intel_shared_dpll *pll)
1283 {
1284 }
1285 
1286 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1287 				     struct intel_shared_dpll *pll,
1288 				     struct intel_dpll_hw_state *hw_state)
1289 {
1290 	u32 val;
1291 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1292 	const enum intel_dpll_id id = pll->info->id;
1293 	intel_wakeref_t wakeref;
1294 	bool ret;
1295 
1296 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1297 						     POWER_DOMAIN_DISPLAY_CORE);
1298 	if (!wakeref)
1299 		return false;
1300 
1301 	ret = false;
1302 
1303 	val = intel_de_read(dev_priv, regs[id].ctl);
1304 	if (!(val & LCPLL_PLL_ENABLE))
1305 		goto out;
1306 
1307 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1308 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1309 
1310 	/* avoid reading back stale values if HDMI mode is not enabled */
1311 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1312 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1313 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1314 	}
1315 	ret = true;
1316 
1317 out:
1318 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1319 
1320 	return ret;
1321 }
1322 
1323 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1324 				       struct intel_shared_dpll *pll,
1325 				       struct intel_dpll_hw_state *hw_state)
1326 {
1327 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1328 	const enum intel_dpll_id id = pll->info->id;
1329 	intel_wakeref_t wakeref;
1330 	u32 val;
1331 	bool ret;
1332 
1333 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1334 						     POWER_DOMAIN_DISPLAY_CORE);
1335 	if (!wakeref)
1336 		return false;
1337 
1338 	ret = false;
1339 
1340 	/* DPLL0 is always enabled since it drives CDCLK */
1341 	val = intel_de_read(dev_priv, regs[id].ctl);
1342 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1343 		goto out;
1344 
1345 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1346 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1347 
1348 	ret = true;
1349 
1350 out:
1351 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1352 
1353 	return ret;
1354 }
1355 
1356 struct skl_wrpll_context {
1357 	u64 min_deviation;		/* current minimal deviation */
1358 	u64 central_freq;		/* chosen central freq */
1359 	u64 dco_freq;			/* chosen dco freq */
1360 	unsigned int p;			/* chosen divider */
1361 };
1362 
1363 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1364 #define SKL_DCO_MAX_PDEVIATION	100
1365 #define SKL_DCO_MAX_NDEVIATION	600
1366 
1367 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1368 				  u64 central_freq,
1369 				  u64 dco_freq,
1370 				  unsigned int divider)
1371 {
1372 	u64 deviation;
1373 
1374 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1375 			      central_freq);
1376 
1377 	/* positive deviation */
1378 	if (dco_freq >= central_freq) {
1379 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1380 		    deviation < ctx->min_deviation) {
1381 			ctx->min_deviation = deviation;
1382 			ctx->central_freq = central_freq;
1383 			ctx->dco_freq = dco_freq;
1384 			ctx->p = divider;
1385 		}
1386 	/* negative deviation */
1387 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1388 		   deviation < ctx->min_deviation) {
1389 		ctx->min_deviation = deviation;
1390 		ctx->central_freq = central_freq;
1391 		ctx->dco_freq = dco_freq;
1392 		ctx->p = divider;
1393 	}
1394 }
1395 
1396 static void skl_wrpll_get_multipliers(unsigned int p,
1397 				      unsigned int *p0 /* out */,
1398 				      unsigned int *p1 /* out */,
1399 				      unsigned int *p2 /* out */)
1400 {
1401 	/* even dividers */
1402 	if (p % 2 == 0) {
1403 		unsigned int half = p / 2;
1404 
1405 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1406 			*p0 = 2;
1407 			*p1 = 1;
1408 			*p2 = half;
1409 		} else if (half % 2 == 0) {
1410 			*p0 = 2;
1411 			*p1 = half / 2;
1412 			*p2 = 2;
1413 		} else if (half % 3 == 0) {
1414 			*p0 = 3;
1415 			*p1 = half / 3;
1416 			*p2 = 2;
1417 		} else if (half % 7 == 0) {
1418 			*p0 = 7;
1419 			*p1 = half / 7;
1420 			*p2 = 2;
1421 		}
1422 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1423 		*p0 = 3;
1424 		*p1 = 1;
1425 		*p2 = p / 3;
1426 	} else if (p == 5 || p == 7) {
1427 		*p0 = p;
1428 		*p1 = 1;
1429 		*p2 = 1;
1430 	} else if (p == 15) {
1431 		*p0 = 3;
1432 		*p1 = 1;
1433 		*p2 = 5;
1434 	} else if (p == 21) {
1435 		*p0 = 7;
1436 		*p1 = 1;
1437 		*p2 = 3;
1438 	} else if (p == 35) {
1439 		*p0 = 7;
1440 		*p1 = 1;
1441 		*p2 = 5;
1442 	}
1443 }
1444 
1445 struct skl_wrpll_params {
1446 	u32 dco_fraction;
1447 	u32 dco_integer;
1448 	u32 qdiv_ratio;
1449 	u32 qdiv_mode;
1450 	u32 kdiv;
1451 	u32 pdiv;
1452 	u32 central_freq;
1453 };
1454 
1455 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1456 				      u64 afe_clock,
1457 				      int ref_clock,
1458 				      u64 central_freq,
1459 				      u32 p0, u32 p1, u32 p2)
1460 {
1461 	u64 dco_freq;
1462 
1463 	switch (central_freq) {
1464 	case 9600000000ULL:
1465 		params->central_freq = 0;
1466 		break;
1467 	case 9000000000ULL:
1468 		params->central_freq = 1;
1469 		break;
1470 	case 8400000000ULL:
1471 		params->central_freq = 3;
1472 	}
1473 
1474 	switch (p0) {
1475 	case 1:
1476 		params->pdiv = 0;
1477 		break;
1478 	case 2:
1479 		params->pdiv = 1;
1480 		break;
1481 	case 3:
1482 		params->pdiv = 2;
1483 		break;
1484 	case 7:
1485 		params->pdiv = 4;
1486 		break;
1487 	default:
1488 		WARN(1, "Incorrect PDiv\n");
1489 	}
1490 
1491 	switch (p2) {
1492 	case 5:
1493 		params->kdiv = 0;
1494 		break;
1495 	case 2:
1496 		params->kdiv = 1;
1497 		break;
1498 	case 3:
1499 		params->kdiv = 2;
1500 		break;
1501 	case 1:
1502 		params->kdiv = 3;
1503 		break;
1504 	default:
1505 		WARN(1, "Incorrect KDiv\n");
1506 	}
1507 
1508 	params->qdiv_ratio = p1;
1509 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1510 
1511 	dco_freq = p0 * p1 * p2 * afe_clock;
1512 
1513 	/*
1514 	 * Intermediate values are in Hz.
1515 	 * Divide by MHz to match bsepc
1516 	 */
1517 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1518 	params->dco_fraction =
1519 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1520 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1521 }
1522 
1523 static int
1524 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1525 			int ref_clock,
1526 			struct skl_wrpll_params *wrpll_params)
1527 {
1528 	static const u64 dco_central_freq[3] = { 8400000000ULL,
1529 						 9000000000ULL,
1530 						 9600000000ULL };
1531 	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1532 					    24, 28, 30, 32, 36, 40, 42, 44,
1533 					    48, 52, 54, 56, 60, 64, 66, 68,
1534 					    70, 72, 76, 78, 80, 84, 88, 90,
1535 					    92, 96, 98 };
1536 	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1537 	static const struct {
1538 		const u8 *list;
1539 		int n_dividers;
1540 	} dividers[] = {
1541 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1542 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1543 	};
1544 	struct skl_wrpll_context ctx = {
1545 		.min_deviation = U64_MAX,
1546 	};
1547 	unsigned int dco, d, i;
1548 	unsigned int p0, p1, p2;
1549 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1550 
1551 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1552 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1553 			for (i = 0; i < dividers[d].n_dividers; i++) {
1554 				unsigned int p = dividers[d].list[i];
1555 				u64 dco_freq = p * afe_clock;
1556 
1557 				skl_wrpll_try_divider(&ctx,
1558 						      dco_central_freq[dco],
1559 						      dco_freq,
1560 						      p);
1561 				/*
1562 				 * Skip the remaining dividers if we're sure to
1563 				 * have found the definitive divider, we can't
1564 				 * improve a 0 deviation.
1565 				 */
1566 				if (ctx.min_deviation == 0)
1567 					goto skip_remaining_dividers;
1568 			}
1569 		}
1570 
1571 skip_remaining_dividers:
1572 		/*
1573 		 * If a solution is found with an even divider, prefer
1574 		 * this one.
1575 		 */
1576 		if (d == 0 && ctx.p)
1577 			break;
1578 	}
1579 
1580 	if (!ctx.p)
1581 		return -EINVAL;
1582 
1583 	/*
1584 	 * gcc incorrectly analyses that these can be used without being
1585 	 * initialized. To be fair, it's hard to guess.
1586 	 */
1587 	p0 = p1 = p2 = 0;
1588 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1589 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1590 				  ctx.central_freq, p0, p1, p2);
1591 
1592 	return 0;
1593 }
1594 
1595 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1596 				  const struct intel_shared_dpll *pll,
1597 				  const struct intel_dpll_hw_state *pll_state)
1598 {
1599 	int ref_clock = i915->display.dpll.ref_clks.nssc;
1600 	u32 p0, p1, p2, dco_freq;
1601 
1602 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1603 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1604 
1605 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1606 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1607 	else
1608 		p1 = 1;
1609 
1610 
1611 	switch (p0) {
1612 	case DPLL_CFGCR2_PDIV_1:
1613 		p0 = 1;
1614 		break;
1615 	case DPLL_CFGCR2_PDIV_2:
1616 		p0 = 2;
1617 		break;
1618 	case DPLL_CFGCR2_PDIV_3:
1619 		p0 = 3;
1620 		break;
1621 	case DPLL_CFGCR2_PDIV_7_INVALID:
1622 		/*
1623 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1624 		 * handling it the same way as PDIV_7.
1625 		 */
1626 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1627 		fallthrough;
1628 	case DPLL_CFGCR2_PDIV_7:
1629 		p0 = 7;
1630 		break;
1631 	default:
1632 		MISSING_CASE(p0);
1633 		return 0;
1634 	}
1635 
1636 	switch (p2) {
1637 	case DPLL_CFGCR2_KDIV_5:
1638 		p2 = 5;
1639 		break;
1640 	case DPLL_CFGCR2_KDIV_2:
1641 		p2 = 2;
1642 		break;
1643 	case DPLL_CFGCR2_KDIV_3:
1644 		p2 = 3;
1645 		break;
1646 	case DPLL_CFGCR2_KDIV_1:
1647 		p2 = 1;
1648 		break;
1649 	default:
1650 		MISSING_CASE(p2);
1651 		return 0;
1652 	}
1653 
1654 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1655 		   ref_clock;
1656 
1657 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1658 		    ref_clock / 0x8000;
1659 
1660 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1661 		return 0;
1662 
1663 	return dco_freq / (p0 * p1 * p2 * 5);
1664 }
1665 
1666 static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1667 {
1668 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1669 	struct skl_wrpll_params wrpll_params = {};
1670 	u32 ctrl1, cfgcr1, cfgcr2;
1671 	int ret;
1672 
1673 	/*
1674 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1675 	 * as the DPLL id in this function.
1676 	 */
1677 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1678 
1679 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1680 
1681 	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1682 				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
1683 	if (ret)
1684 		return ret;
1685 
1686 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1687 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1688 		wrpll_params.dco_integer;
1689 
1690 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1691 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1692 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1693 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1694 		wrpll_params.central_freq;
1695 
1696 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1697 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1698 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1699 
1700 	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
1701 							&crtc_state->dpll_hw_state);
1702 
1703 	return 0;
1704 }
1705 
1706 static int
1707 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1708 {
1709 	u32 ctrl1;
1710 
1711 	/*
1712 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1713 	 * as the DPLL id in this function.
1714 	 */
1715 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1716 	switch (crtc_state->port_clock / 2) {
1717 	case 81000:
1718 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1719 		break;
1720 	case 135000:
1721 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1722 		break;
1723 	case 270000:
1724 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1725 		break;
1726 		/* eDP 1.4 rates */
1727 	case 162000:
1728 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1729 		break;
1730 	case 108000:
1731 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1732 		break;
1733 	case 216000:
1734 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1735 		break;
1736 	}
1737 
1738 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1739 
1740 	return 0;
1741 }
1742 
1743 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1744 				  const struct intel_shared_dpll *pll,
1745 				  const struct intel_dpll_hw_state *pll_state)
1746 {
1747 	int link_clock = 0;
1748 
1749 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1750 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1751 	case DPLL_CTRL1_LINK_RATE_810:
1752 		link_clock = 81000;
1753 		break;
1754 	case DPLL_CTRL1_LINK_RATE_1080:
1755 		link_clock = 108000;
1756 		break;
1757 	case DPLL_CTRL1_LINK_RATE_1350:
1758 		link_clock = 135000;
1759 		break;
1760 	case DPLL_CTRL1_LINK_RATE_1620:
1761 		link_clock = 162000;
1762 		break;
1763 	case DPLL_CTRL1_LINK_RATE_2160:
1764 		link_clock = 216000;
1765 		break;
1766 	case DPLL_CTRL1_LINK_RATE_2700:
1767 		link_clock = 270000;
1768 		break;
1769 	default:
1770 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1771 		break;
1772 	}
1773 
1774 	return link_clock * 2;
1775 }
1776 
1777 static int skl_compute_dpll(struct intel_atomic_state *state,
1778 			    struct intel_crtc *crtc,
1779 			    struct intel_encoder *encoder)
1780 {
1781 	struct intel_crtc_state *crtc_state =
1782 		intel_atomic_get_new_crtc_state(state, crtc);
1783 
1784 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1785 		return skl_ddi_hdmi_pll_dividers(crtc_state);
1786 	else if (intel_crtc_has_dp_encoder(crtc_state))
1787 		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1788 	else
1789 		return -EINVAL;
1790 }
1791 
1792 static int skl_get_dpll(struct intel_atomic_state *state,
1793 			struct intel_crtc *crtc,
1794 			struct intel_encoder *encoder)
1795 {
1796 	struct intel_crtc_state *crtc_state =
1797 		intel_atomic_get_new_crtc_state(state, crtc);
1798 	struct intel_shared_dpll *pll;
1799 
1800 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1801 		pll = intel_find_shared_dpll(state, crtc,
1802 					     &crtc_state->dpll_hw_state,
1803 					     BIT(DPLL_ID_SKL_DPLL0));
1804 	else
1805 		pll = intel_find_shared_dpll(state, crtc,
1806 					     &crtc_state->dpll_hw_state,
1807 					     BIT(DPLL_ID_SKL_DPLL3) |
1808 					     BIT(DPLL_ID_SKL_DPLL2) |
1809 					     BIT(DPLL_ID_SKL_DPLL1));
1810 	if (!pll)
1811 		return -EINVAL;
1812 
1813 	intel_reference_shared_dpll(state, crtc,
1814 				    pll, &crtc_state->dpll_hw_state);
1815 
1816 	crtc_state->shared_dpll = pll;
1817 
1818 	return 0;
1819 }
1820 
1821 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1822 				const struct intel_shared_dpll *pll,
1823 				const struct intel_dpll_hw_state *pll_state)
1824 {
1825 	/*
1826 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1827 	 * the internal shift for each field
1828 	 */
1829 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1830 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1831 	else
1832 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1833 }
1834 
1835 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1836 {
1837 	/* No SSC ref */
1838 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1839 }
1840 
1841 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1842 			      const struct intel_dpll_hw_state *hw_state)
1843 {
1844 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1845 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1846 		      hw_state->ctrl1,
1847 		      hw_state->cfgcr1,
1848 		      hw_state->cfgcr2);
1849 }
1850 
1851 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1852 	.enable = skl_ddi_pll_enable,
1853 	.disable = skl_ddi_pll_disable,
1854 	.get_hw_state = skl_ddi_pll_get_hw_state,
1855 	.get_freq = skl_ddi_pll_get_freq,
1856 };
1857 
1858 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1859 	.enable = skl_ddi_dpll0_enable,
1860 	.disable = skl_ddi_dpll0_disable,
1861 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1862 	.get_freq = skl_ddi_pll_get_freq,
1863 };
1864 
1865 static const struct dpll_info skl_plls[] = {
1866 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1867 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1868 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1869 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1870 	{ },
1871 };
1872 
1873 static const struct intel_dpll_mgr skl_pll_mgr = {
1874 	.dpll_info = skl_plls,
1875 	.compute_dplls = skl_compute_dpll,
1876 	.get_dplls = skl_get_dpll,
1877 	.put_dplls = intel_put_dpll,
1878 	.update_ref_clks = skl_update_dpll_ref_clks,
1879 	.dump_hw_state = skl_dump_hw_state,
1880 };
1881 
1882 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1883 				struct intel_shared_dpll *pll)
1884 {
1885 	u32 temp;
1886 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1887 	enum dpio_phy phy;
1888 	enum dpio_channel ch;
1889 
1890 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1891 
1892 	/* Non-SSC reference */
1893 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1894 
1895 	if (IS_GEMINILAKE(dev_priv)) {
1896 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1897 			     0, PORT_PLL_POWER_ENABLE);
1898 
1899 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1900 				 PORT_PLL_POWER_STATE), 200))
1901 			drm_err(&dev_priv->drm,
1902 				"Power state not set for PLL:%d\n", port);
1903 	}
1904 
1905 	/* Disable 10 bit clock */
1906 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
1907 		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1908 
1909 	/* Write P1 & P2 */
1910 	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
1911 		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1912 
1913 	/* Write M2 integer */
1914 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
1915 		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1916 
1917 	/* Write N */
1918 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
1919 		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1920 
1921 	/* Write M2 fraction */
1922 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
1923 		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1924 
1925 	/* Write M2 fraction enable */
1926 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
1927 		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1928 
1929 	/* Write coeff */
1930 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1931 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1932 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1933 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1934 	temp |= pll->state.hw_state.pll6;
1935 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1936 
1937 	/* Write calibration val */
1938 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
1939 		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1940 
1941 	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
1942 		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1943 
1944 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1945 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1946 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1947 	temp |= pll->state.hw_state.pll10;
1948 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1949 
1950 	/* Recalibrate with new settings */
1951 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1952 	temp |= PORT_PLL_RECALIBRATE;
1953 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1954 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1955 	temp |= pll->state.hw_state.ebb4;
1956 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1957 
1958 	/* Enable PLL */
1959 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1960 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1961 
1962 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1963 			200))
1964 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1965 
1966 	if (IS_GEMINILAKE(dev_priv)) {
1967 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1968 		temp |= DCC_DELAY_RANGE_2;
1969 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1970 	}
1971 
1972 	/*
1973 	 * While we write to the group register to program all lanes at once we
1974 	 * can read only lane registers and we pick lanes 0/1 for that.
1975 	 */
1976 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1977 	temp &= ~LANE_STAGGER_MASK;
1978 	temp &= ~LANESTAGGER_STRAP_OVRD;
1979 	temp |= pll->state.hw_state.pcsdw12;
1980 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1981 }
1982 
1983 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1984 					struct intel_shared_dpll *pll)
1985 {
1986 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1987 
1988 	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
1989 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1990 
1991 	if (IS_GEMINILAKE(dev_priv)) {
1992 		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
1993 			     PORT_PLL_POWER_ENABLE, 0);
1994 
1995 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1996 				  PORT_PLL_POWER_STATE), 200))
1997 			drm_err(&dev_priv->drm,
1998 				"Power state not reset for PLL:%d\n", port);
1999 	}
2000 }
2001 
2002 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2003 					struct intel_shared_dpll *pll,
2004 					struct intel_dpll_hw_state *hw_state)
2005 {
2006 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2007 	intel_wakeref_t wakeref;
2008 	enum dpio_phy phy;
2009 	enum dpio_channel ch;
2010 	u32 val;
2011 	bool ret;
2012 
2013 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2014 
2015 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2016 						     POWER_DOMAIN_DISPLAY_CORE);
2017 	if (!wakeref)
2018 		return false;
2019 
2020 	ret = false;
2021 
2022 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2023 	if (!(val & PORT_PLL_ENABLE))
2024 		goto out;
2025 
2026 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2027 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2028 
2029 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2030 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2031 
2032 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2033 	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2034 
2035 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2036 	hw_state->pll1 &= PORT_PLL_N_MASK;
2037 
2038 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2039 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2040 
2041 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2042 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2043 
2044 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2045 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2046 			  PORT_PLL_INT_COEFF_MASK |
2047 			  PORT_PLL_GAIN_CTL_MASK;
2048 
2049 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2050 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2051 
2052 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2053 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2054 
2055 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2056 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2057 			   PORT_PLL_DCO_AMP_MASK;
2058 
2059 	/*
2060 	 * While we write to the group register to program all lanes at once we
2061 	 * can read only lane registers. We configure all lanes the same way, so
2062 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2063 	 */
2064 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2065 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2066 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2067 		drm_dbg(&dev_priv->drm,
2068 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2069 			hw_state->pcsdw12,
2070 			intel_de_read(dev_priv,
2071 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2072 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2073 
2074 	ret = true;
2075 
2076 out:
2077 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2078 
2079 	return ret;
2080 }
2081 
2082 /* pre-calculated values for DP linkrates */
2083 static const struct dpll bxt_dp_clk_val[] = {
2084 	/* m2 is .22 binary fixed point */
2085 	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2086 	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2087 	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
2088 	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2089 	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
2090 	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2091 	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2092 };
2093 
2094 static int
2095 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2096 			  struct dpll *clk_div)
2097 {
2098 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2099 
2100 	/* Calculate HDMI div */
2101 	/*
2102 	 * FIXME: tie the following calculation into
2103 	 * i9xx_crtc_compute_clock
2104 	 */
2105 	if (!bxt_find_best_dpll(crtc_state, clk_div))
2106 		return -EINVAL;
2107 
2108 	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2109 
2110 	return 0;
2111 }
2112 
2113 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2114 				    struct dpll *clk_div)
2115 {
2116 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2117 	int i;
2118 
2119 	*clk_div = bxt_dp_clk_val[0];
2120 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2121 		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2122 			*clk_div = bxt_dp_clk_val[i];
2123 			break;
2124 		}
2125 	}
2126 
2127 	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2128 
2129 	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
2130 		    clk_div->dot != crtc_state->port_clock);
2131 }
2132 
2133 static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2134 				     const struct dpll *clk_div)
2135 {
2136 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2137 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2138 	int clock = crtc_state->port_clock;
2139 	int vco = clk_div->vco;
2140 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2141 	u32 lanestagger;
2142 
2143 	if (vco >= 6200000 && vco <= 6700000) {
2144 		prop_coef = 4;
2145 		int_coef = 9;
2146 		gain_ctl = 3;
2147 		targ_cnt = 8;
2148 	} else if ((vco > 5400000 && vco < 6200000) ||
2149 			(vco >= 4800000 && vco < 5400000)) {
2150 		prop_coef = 5;
2151 		int_coef = 11;
2152 		gain_ctl = 3;
2153 		targ_cnt = 9;
2154 	} else if (vco == 5400000) {
2155 		prop_coef = 3;
2156 		int_coef = 8;
2157 		gain_ctl = 1;
2158 		targ_cnt = 9;
2159 	} else {
2160 		drm_err(&i915->drm, "Invalid VCO\n");
2161 		return -EINVAL;
2162 	}
2163 
2164 	if (clock > 270000)
2165 		lanestagger = 0x18;
2166 	else if (clock > 135000)
2167 		lanestagger = 0x0d;
2168 	else if (clock > 67000)
2169 		lanestagger = 0x07;
2170 	else if (clock > 33000)
2171 		lanestagger = 0x04;
2172 	else
2173 		lanestagger = 0x02;
2174 
2175 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2176 	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2177 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2178 	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2179 
2180 	if (clk_div->m2 & 0x3fffff)
2181 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2182 
2183 	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
2184 		PORT_PLL_INT_COEFF(int_coef) |
2185 		PORT_PLL_GAIN_CTL(gain_ctl);
2186 
2187 	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2188 
2189 	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2190 
2191 	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
2192 		PORT_PLL_DCO_AMP_OVR_EN_H;
2193 
2194 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2195 
2196 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2197 
2198 	return 0;
2199 }
2200 
2201 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2202 				const struct intel_shared_dpll *pll,
2203 				const struct intel_dpll_hw_state *pll_state)
2204 {
2205 	struct dpll clock;
2206 
2207 	clock.m1 = 2;
2208 	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
2209 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2210 		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
2211 	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
2212 	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
2213 	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
2214 
2215 	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
2216 }
2217 
2218 static int
2219 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2220 {
2221 	struct dpll clk_div = {};
2222 
2223 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2224 
2225 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2226 }
2227 
2228 static int
2229 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2230 {
2231 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2232 	struct dpll clk_div = {};
2233 	int ret;
2234 
2235 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2236 
2237 	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2238 	if (ret)
2239 		return ret;
2240 
2241 	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
2242 						      &crtc_state->dpll_hw_state);
2243 
2244 	return 0;
2245 }
2246 
2247 static int bxt_compute_dpll(struct intel_atomic_state *state,
2248 			    struct intel_crtc *crtc,
2249 			    struct intel_encoder *encoder)
2250 {
2251 	struct intel_crtc_state *crtc_state =
2252 		intel_atomic_get_new_crtc_state(state, crtc);
2253 
2254 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
2255 		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
2256 	else if (intel_crtc_has_dp_encoder(crtc_state))
2257 		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
2258 	else
2259 		return -EINVAL;
2260 }
2261 
2262 static int bxt_get_dpll(struct intel_atomic_state *state,
2263 			struct intel_crtc *crtc,
2264 			struct intel_encoder *encoder)
2265 {
2266 	struct intel_crtc_state *crtc_state =
2267 		intel_atomic_get_new_crtc_state(state, crtc);
2268 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2269 	struct intel_shared_dpll *pll;
2270 	enum intel_dpll_id id;
2271 
2272 	/* 1:1 mapping between ports and PLLs */
2273 	id = (enum intel_dpll_id) encoder->port;
2274 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2275 
2276 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2277 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2278 
2279 	intel_reference_shared_dpll(state, crtc,
2280 				    pll, &crtc_state->dpll_hw_state);
2281 
2282 	crtc_state->shared_dpll = pll;
2283 
2284 	return 0;
2285 }
2286 
2287 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2288 {
2289 	i915->display.dpll.ref_clks.ssc = 100000;
2290 	i915->display.dpll.ref_clks.nssc = 100000;
2291 	/* DSI non-SSC ref 19.2MHz */
2292 }
2293 
2294 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2295 			      const struct intel_dpll_hw_state *hw_state)
2296 {
2297 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2298 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2299 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2300 		    hw_state->ebb0,
2301 		    hw_state->ebb4,
2302 		    hw_state->pll0,
2303 		    hw_state->pll1,
2304 		    hw_state->pll2,
2305 		    hw_state->pll3,
2306 		    hw_state->pll6,
2307 		    hw_state->pll8,
2308 		    hw_state->pll9,
2309 		    hw_state->pll10,
2310 		    hw_state->pcsdw12);
2311 }
2312 
2313 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2314 	.enable = bxt_ddi_pll_enable,
2315 	.disable = bxt_ddi_pll_disable,
2316 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2317 	.get_freq = bxt_ddi_pll_get_freq,
2318 };
2319 
2320 static const struct dpll_info bxt_plls[] = {
2321 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2322 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2323 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2324 	{ },
2325 };
2326 
2327 static const struct intel_dpll_mgr bxt_pll_mgr = {
2328 	.dpll_info = bxt_plls,
2329 	.compute_dplls = bxt_compute_dpll,
2330 	.get_dplls = bxt_get_dpll,
2331 	.put_dplls = intel_put_dpll,
2332 	.update_ref_clks = bxt_update_dpll_ref_clks,
2333 	.dump_hw_state = bxt_dump_hw_state,
2334 };
2335 
2336 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2337 				      int *qdiv, int *kdiv)
2338 {
2339 	/* even dividers */
2340 	if (bestdiv % 2 == 0) {
2341 		if (bestdiv == 2) {
2342 			*pdiv = 2;
2343 			*qdiv = 1;
2344 			*kdiv = 1;
2345 		} else if (bestdiv % 4 == 0) {
2346 			*pdiv = 2;
2347 			*qdiv = bestdiv / 4;
2348 			*kdiv = 2;
2349 		} else if (bestdiv % 6 == 0) {
2350 			*pdiv = 3;
2351 			*qdiv = bestdiv / 6;
2352 			*kdiv = 2;
2353 		} else if (bestdiv % 5 == 0) {
2354 			*pdiv = 5;
2355 			*qdiv = bestdiv / 10;
2356 			*kdiv = 2;
2357 		} else if (bestdiv % 14 == 0) {
2358 			*pdiv = 7;
2359 			*qdiv = bestdiv / 14;
2360 			*kdiv = 2;
2361 		}
2362 	} else {
2363 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2364 			*pdiv = bestdiv;
2365 			*qdiv = 1;
2366 			*kdiv = 1;
2367 		} else { /* 9, 15, 21 */
2368 			*pdiv = bestdiv / 3;
2369 			*qdiv = 1;
2370 			*kdiv = 3;
2371 		}
2372 	}
2373 }
2374 
2375 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2376 				      u32 dco_freq, u32 ref_freq,
2377 				      int pdiv, int qdiv, int kdiv)
2378 {
2379 	u32 dco;
2380 
2381 	switch (kdiv) {
2382 	case 1:
2383 		params->kdiv = 1;
2384 		break;
2385 	case 2:
2386 		params->kdiv = 2;
2387 		break;
2388 	case 3:
2389 		params->kdiv = 4;
2390 		break;
2391 	default:
2392 		WARN(1, "Incorrect KDiv\n");
2393 	}
2394 
2395 	switch (pdiv) {
2396 	case 2:
2397 		params->pdiv = 1;
2398 		break;
2399 	case 3:
2400 		params->pdiv = 2;
2401 		break;
2402 	case 5:
2403 		params->pdiv = 4;
2404 		break;
2405 	case 7:
2406 		params->pdiv = 8;
2407 		break;
2408 	default:
2409 		WARN(1, "Incorrect PDiv\n");
2410 	}
2411 
2412 	WARN_ON(kdiv != 2 && qdiv != 1);
2413 
2414 	params->qdiv_ratio = qdiv;
2415 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2416 
2417 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2418 
2419 	params->dco_integer = dco >> 15;
2420 	params->dco_fraction = dco & 0x7fff;
2421 }
2422 
2423 /*
2424  * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2425  * Program half of the nominal DCO divider fraction value.
2426  */
2427 static bool
2428 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2429 {
2430 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2431 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2432 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2433 		 i915->display.dpll.ref_clks.nssc == 38400;
2434 }
2435 
2436 struct icl_combo_pll_params {
2437 	int clock;
2438 	struct skl_wrpll_params wrpll;
2439 };
2440 
2441 /*
2442  * These values alrea already adjusted: they're the bits we write to the
2443  * registers, not the logical values.
2444  */
2445 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2446 	{ 540000,
2447 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2448 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2449 	{ 270000,
2450 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2451 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2452 	{ 162000,
2453 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2454 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2455 	{ 324000,
2456 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2457 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2458 	{ 216000,
2459 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2460 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2461 	{ 432000,
2462 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2463 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2464 	{ 648000,
2465 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2466 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2467 	{ 810000,
2468 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2469 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2470 };
2471 
2472 
2473 /* Also used for 38.4 MHz values. */
2474 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2475 	{ 540000,
2476 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2477 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2478 	{ 270000,
2479 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2480 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2481 	{ 162000,
2482 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2483 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2484 	{ 324000,
2485 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2486 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2487 	{ 216000,
2488 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2489 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2490 	{ 432000,
2491 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2492 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2493 	{ 648000,
2494 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2495 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2496 	{ 810000,
2497 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2498 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2499 };
2500 
2501 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2502 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2503 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2504 };
2505 
2506 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2507 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2508 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2509 };
2510 
2511 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2512 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2513 	/* the following params are unused */
2514 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2515 };
2516 
2517 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2518 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2519 	/* the following params are unused */
2520 };
2521 
2522 static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2523 				 struct skl_wrpll_params *pll_params)
2524 {
2525 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2526 	const struct icl_combo_pll_params *params =
2527 		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2528 		icl_dp_combo_pll_24MHz_values :
2529 		icl_dp_combo_pll_19_2MHz_values;
2530 	int clock = crtc_state->port_clock;
2531 	int i;
2532 
2533 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2534 		if (clock == params[i].clock) {
2535 			*pll_params = params[i].wrpll;
2536 			return 0;
2537 		}
2538 	}
2539 
2540 	MISSING_CASE(clock);
2541 	return -EINVAL;
2542 }
2543 
2544 static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2545 			    struct skl_wrpll_params *pll_params)
2546 {
2547 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2548 
2549 	if (DISPLAY_VER(dev_priv) >= 12) {
2550 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2551 		default:
2552 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2553 			fallthrough;
2554 		case 19200:
2555 		case 38400:
2556 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2557 			break;
2558 		case 24000:
2559 			*pll_params = tgl_tbt_pll_24MHz_values;
2560 			break;
2561 		}
2562 	} else {
2563 		switch (dev_priv->display.dpll.ref_clks.nssc) {
2564 		default:
2565 			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2566 			fallthrough;
2567 		case 19200:
2568 		case 38400:
2569 			*pll_params = icl_tbt_pll_19_2MHz_values;
2570 			break;
2571 		case 24000:
2572 			*pll_params = icl_tbt_pll_24MHz_values;
2573 			break;
2574 		}
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2581 				    const struct intel_shared_dpll *pll,
2582 				    const struct intel_dpll_hw_state *pll_state)
2583 {
2584 	/*
2585 	 * The PLL outputs multiple frequencies at the same time, selection is
2586 	 * made at DDI clock mux level.
2587 	 */
2588 	drm_WARN_ON(&i915->drm, 1);
2589 
2590 	return 0;
2591 }
2592 
2593 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2594 {
2595 	int ref_clock = i915->display.dpll.ref_clks.nssc;
2596 
2597 	/*
2598 	 * For ICL+, the spec states: if reference frequency is 38.4,
2599 	 * use 19.2 because the DPLL automatically divides that by 2.
2600 	 */
2601 	if (ref_clock == 38400)
2602 		ref_clock = 19200;
2603 
2604 	return ref_clock;
2605 }
2606 
2607 static int
2608 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2609 	       struct skl_wrpll_params *wrpll_params)
2610 {
2611 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2612 	int ref_clock = icl_wrpll_ref_clock(i915);
2613 	u32 afe_clock = crtc_state->port_clock * 5;
2614 	u32 dco_min = 7998000;
2615 	u32 dco_max = 10000000;
2616 	u32 dco_mid = (dco_min + dco_max) / 2;
2617 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2618 					 18, 20, 24, 28, 30, 32,  36,  40,
2619 					 42, 44, 48, 50, 52, 54,  56,  60,
2620 					 64, 66, 68, 70, 72, 76,  78,  80,
2621 					 84, 88, 90, 92, 96, 98, 100, 102,
2622 					  3,  5,  7,  9, 15, 21 };
2623 	u32 dco, best_dco = 0, dco_centrality = 0;
2624 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2625 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2626 
2627 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2628 		dco = afe_clock * dividers[d];
2629 
2630 		if (dco <= dco_max && dco >= dco_min) {
2631 			dco_centrality = abs(dco - dco_mid);
2632 
2633 			if (dco_centrality < best_dco_centrality) {
2634 				best_dco_centrality = dco_centrality;
2635 				best_div = dividers[d];
2636 				best_dco = dco;
2637 			}
2638 		}
2639 	}
2640 
2641 	if (best_div == 0)
2642 		return -EINVAL;
2643 
2644 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2645 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2646 				  pdiv, qdiv, kdiv);
2647 
2648 	return 0;
2649 }
2650 
2651 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2652 				      const struct intel_shared_dpll *pll,
2653 				      const struct intel_dpll_hw_state *pll_state)
2654 {
2655 	int ref_clock = icl_wrpll_ref_clock(i915);
2656 	u32 dco_fraction;
2657 	u32 p0, p1, p2, dco_freq;
2658 
2659 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2660 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2661 
2662 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2663 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2664 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2665 	else
2666 		p1 = 1;
2667 
2668 	switch (p0) {
2669 	case DPLL_CFGCR1_PDIV_2:
2670 		p0 = 2;
2671 		break;
2672 	case DPLL_CFGCR1_PDIV_3:
2673 		p0 = 3;
2674 		break;
2675 	case DPLL_CFGCR1_PDIV_5:
2676 		p0 = 5;
2677 		break;
2678 	case DPLL_CFGCR1_PDIV_7:
2679 		p0 = 7;
2680 		break;
2681 	}
2682 
2683 	switch (p2) {
2684 	case DPLL_CFGCR1_KDIV_1:
2685 		p2 = 1;
2686 		break;
2687 	case DPLL_CFGCR1_KDIV_2:
2688 		p2 = 2;
2689 		break;
2690 	case DPLL_CFGCR1_KDIV_3:
2691 		p2 = 3;
2692 		break;
2693 	}
2694 
2695 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2696 		   ref_clock;
2697 
2698 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2699 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2700 
2701 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2702 		dco_fraction *= 2;
2703 
2704 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2705 
2706 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2707 		return 0;
2708 
2709 	return dco_freq / (p0 * p1 * p2 * 5);
2710 }
2711 
2712 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2713 				const struct skl_wrpll_params *pll_params,
2714 				struct intel_dpll_hw_state *pll_state)
2715 {
2716 	u32 dco_fraction = pll_params->dco_fraction;
2717 
2718 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2719 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2720 
2721 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2722 			    pll_params->dco_integer;
2723 
2724 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2725 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2726 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2727 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2728 
2729 	if (DISPLAY_VER(i915) >= 12)
2730 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2731 	else
2732 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2733 
2734 	if (i915->display.vbt.override_afc_startup)
2735 		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2736 }
2737 
2738 static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2739 				    u32 *target_dco_khz,
2740 				    struct intel_dpll_hw_state *state,
2741 				    bool is_dkl)
2742 {
2743 	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2744 	u32 dco_min_freq, dco_max_freq;
2745 	unsigned int i;
2746 	int div2;
2747 
2748 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2749 	dco_max_freq = is_dp ? 8100000 : 10000000;
2750 
2751 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2752 		int div1 = div1_vals[i];
2753 
2754 		for (div2 = 10; div2 > 0; div2--) {
2755 			int dco = div1 * div2 * clock_khz * 5;
2756 			int a_divratio, tlinedrv, inputsel;
2757 			u32 hsdiv;
2758 
2759 			if (dco < dco_min_freq || dco > dco_max_freq)
2760 				continue;
2761 
2762 			if (div2 >= 2) {
2763 				/*
2764 				 * Note: a_divratio not matching TGL BSpec
2765 				 * algorithm but matching hardcoded values and
2766 				 * working on HW for DP alt-mode at least
2767 				 */
2768 				a_divratio = is_dp ? 10 : 5;
2769 				tlinedrv = is_dkl ? 1 : 2;
2770 			} else {
2771 				a_divratio = 5;
2772 				tlinedrv = 0;
2773 			}
2774 			inputsel = is_dp ? 0 : 1;
2775 
2776 			switch (div1) {
2777 			default:
2778 				MISSING_CASE(div1);
2779 				fallthrough;
2780 			case 2:
2781 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2782 				break;
2783 			case 3:
2784 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2785 				break;
2786 			case 5:
2787 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2788 				break;
2789 			case 7:
2790 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2791 				break;
2792 			}
2793 
2794 			*target_dco_khz = dco;
2795 
2796 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2797 
2798 			state->mg_clktop2_coreclkctl1 =
2799 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2800 
2801 			state->mg_clktop2_hsclkctl =
2802 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2803 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2804 				hsdiv |
2805 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2806 
2807 			return 0;
2808 		}
2809 	}
2810 
2811 	return -EINVAL;
2812 }
2813 
2814 /*
2815  * The specification for this function uses real numbers, so the math had to be
2816  * adapted to integer-only calculation, that's why it looks so different.
2817  */
2818 static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2819 				 struct intel_dpll_hw_state *pll_state)
2820 {
2821 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2822 	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2823 	int clock = crtc_state->port_clock;
2824 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2825 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2826 	u32 prop_coeff, int_coeff;
2827 	u32 tdc_targetcnt, feedfwgain;
2828 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2829 	u64 tmp;
2830 	bool use_ssc = false;
2831 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2832 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2833 	int ret;
2834 
2835 	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2836 				       pll_state, is_dkl);
2837 	if (ret)
2838 		return ret;
2839 
2840 	m1div = 2;
2841 	m2div_int = dco_khz / (refclk_khz * m1div);
2842 	if (m2div_int > 255) {
2843 		if (!is_dkl) {
2844 			m1div = 4;
2845 			m2div_int = dco_khz / (refclk_khz * m1div);
2846 		}
2847 
2848 		if (m2div_int > 255)
2849 			return -EINVAL;
2850 	}
2851 	m2div_rem = dco_khz % (refclk_khz * m1div);
2852 
2853 	tmp = (u64)m2div_rem * (1 << 22);
2854 	do_div(tmp, refclk_khz * m1div);
2855 	m2div_frac = tmp;
2856 
2857 	switch (refclk_khz) {
2858 	case 19200:
2859 		iref_ndiv = 1;
2860 		iref_trim = 28;
2861 		iref_pulse_w = 1;
2862 		break;
2863 	case 24000:
2864 		iref_ndiv = 1;
2865 		iref_trim = 25;
2866 		iref_pulse_w = 2;
2867 		break;
2868 	case 38400:
2869 		iref_ndiv = 2;
2870 		iref_trim = 28;
2871 		iref_pulse_w = 1;
2872 		break;
2873 	default:
2874 		MISSING_CASE(refclk_khz);
2875 		return -EINVAL;
2876 	}
2877 
2878 	/*
2879 	 * tdc_res = 0.000003
2880 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2881 	 *
2882 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2883 	 * was supposed to be a division, but we rearranged the operations of
2884 	 * the formula to avoid early divisions so we don't multiply the
2885 	 * rounding errors.
2886 	 *
2887 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2888 	 * we also rearrange to work with integers.
2889 	 *
2890 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2891 	 * last division by 10.
2892 	 */
2893 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2894 
2895 	/*
2896 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2897 	 * 32 bits. That's not a problem since we round the division down
2898 	 * anyway.
2899 	 */
2900 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2901 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2902 
2903 	if (dco_khz >= 9000000) {
2904 		prop_coeff = 5;
2905 		int_coeff = 10;
2906 	} else {
2907 		prop_coeff = 4;
2908 		int_coeff = 8;
2909 	}
2910 
2911 	if (use_ssc) {
2912 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2913 		do_div(tmp, refclk_khz * m1div * 10000);
2914 		ssc_stepsize = tmp;
2915 
2916 		tmp = mul_u32_u32(dco_khz, 1000);
2917 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2918 	} else {
2919 		ssc_stepsize = 0;
2920 		ssc_steplen = 0;
2921 	}
2922 	ssc_steplog = 4;
2923 
2924 	/* write pll_state calculations */
2925 	if (is_dkl) {
2926 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2927 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2928 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2929 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2930 		if (dev_priv->display.vbt.override_afc_startup) {
2931 			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2932 
2933 			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
2934 		}
2935 
2936 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2937 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2938 
2939 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2940 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2941 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2942 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2943 
2944 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2945 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2946 
2947 		pll_state->mg_pll_tdc_coldst_bias =
2948 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2949 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2950 
2951 	} else {
2952 		pll_state->mg_pll_div0 =
2953 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2954 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2955 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2956 
2957 		pll_state->mg_pll_div1 =
2958 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2959 			MG_PLL_DIV1_DITHER_DIV_2 |
2960 			MG_PLL_DIV1_NDIVRATIO(1) |
2961 			MG_PLL_DIV1_FBPREDIV(m1div);
2962 
2963 		pll_state->mg_pll_lf =
2964 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2965 			MG_PLL_LF_AFCCNTSEL_512 |
2966 			MG_PLL_LF_GAINCTRL(1) |
2967 			MG_PLL_LF_INT_COEFF(int_coeff) |
2968 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2969 
2970 		pll_state->mg_pll_frac_lock =
2971 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2972 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2973 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2974 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2975 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2976 		if (use_ssc || m2div_rem > 0)
2977 			pll_state->mg_pll_frac_lock |=
2978 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2979 
2980 		pll_state->mg_pll_ssc =
2981 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2982 			MG_PLL_SSC_TYPE(2) |
2983 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2984 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2985 			MG_PLL_SSC_FLLEN |
2986 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2987 
2988 		pll_state->mg_pll_tdc_coldst_bias =
2989 			MG_PLL_TDC_COLDST_COLDSTART |
2990 			MG_PLL_TDC_COLDST_IREFINT_EN |
2991 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2992 			MG_PLL_TDC_TDCOVCCORR_EN |
2993 			MG_PLL_TDC_TDCSEL(3);
2994 
2995 		pll_state->mg_pll_bias =
2996 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
2997 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2998 			MG_PLL_BIAS_BIAS_BONUS(10) |
2999 			MG_PLL_BIAS_BIASCAL_EN |
3000 			MG_PLL_BIAS_CTRIM(12) |
3001 			MG_PLL_BIAS_VREF_RDAC(4) |
3002 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3003 
3004 		if (refclk_khz == 38400) {
3005 			pll_state->mg_pll_tdc_coldst_bias_mask =
3006 				MG_PLL_TDC_COLDST_COLDSTART;
3007 			pll_state->mg_pll_bias_mask = 0;
3008 		} else {
3009 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3010 			pll_state->mg_pll_bias_mask = -1U;
3011 		}
3012 
3013 		pll_state->mg_pll_tdc_coldst_bias &=
3014 			pll_state->mg_pll_tdc_coldst_bias_mask;
3015 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3016 	}
3017 
3018 	return 0;
3019 }
3020 
3021 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3022 				   const struct intel_shared_dpll *pll,
3023 				   const struct intel_dpll_hw_state *pll_state)
3024 {
3025 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3026 	u64 tmp;
3027 
3028 	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3029 
3030 	if (DISPLAY_VER(dev_priv) >= 12) {
3031 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3032 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3033 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3034 
3035 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3036 			m2_frac = pll_state->mg_pll_bias &
3037 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3038 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3039 		} else {
3040 			m2_frac = 0;
3041 		}
3042 	} else {
3043 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3044 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3045 
3046 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3047 			m2_frac = pll_state->mg_pll_div0 &
3048 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3049 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3050 		} else {
3051 			m2_frac = 0;
3052 		}
3053 	}
3054 
3055 	switch (pll_state->mg_clktop2_hsclkctl &
3056 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3057 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3058 		div1 = 2;
3059 		break;
3060 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3061 		div1 = 3;
3062 		break;
3063 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3064 		div1 = 5;
3065 		break;
3066 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3067 		div1 = 7;
3068 		break;
3069 	default:
3070 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3071 		return 0;
3072 	}
3073 
3074 	div2 = (pll_state->mg_clktop2_hsclkctl &
3075 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3076 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3077 
3078 	/* div2 value of 0 is same as 1 means no div */
3079 	if (div2 == 0)
3080 		div2 = 1;
3081 
3082 	/*
3083 	 * Adjust the original formula to delay the division by 2^22 in order to
3084 	 * minimize possible rounding errors.
3085 	 */
3086 	tmp = (u64)m1 * m2_int * ref_clock +
3087 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3088 	tmp = div_u64(tmp, 5 * div1 * div2);
3089 
3090 	return tmp;
3091 }
3092 
3093 /**
3094  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3095  * @crtc_state: state for the CRTC to select the DPLL for
3096  * @port_dpll_id: the active @port_dpll_id to select
3097  *
3098  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3099  * CRTC.
3100  */
3101 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3102 			      enum icl_port_dpll_id port_dpll_id)
3103 {
3104 	struct icl_port_dpll *port_dpll =
3105 		&crtc_state->icl_port_dplls[port_dpll_id];
3106 
3107 	crtc_state->shared_dpll = port_dpll->pll;
3108 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3109 }
3110 
3111 static void icl_update_active_dpll(struct intel_atomic_state *state,
3112 				   struct intel_crtc *crtc,
3113 				   struct intel_encoder *encoder)
3114 {
3115 	struct intel_crtc_state *crtc_state =
3116 		intel_atomic_get_new_crtc_state(state, crtc);
3117 	struct intel_digital_port *primary_port;
3118 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3119 
3120 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3121 		enc_to_mst(encoder)->primary :
3122 		enc_to_dig_port(encoder);
3123 
3124 	if (primary_port &&
3125 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3126 	     intel_tc_port_in_legacy_mode(primary_port)))
3127 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3128 
3129 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3130 }
3131 
3132 static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
3133 				      struct intel_crtc *crtc)
3134 {
3135 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3136 	struct intel_crtc_state *crtc_state =
3137 		intel_atomic_get_new_crtc_state(state, crtc);
3138 	struct icl_port_dpll *port_dpll =
3139 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3140 	struct skl_wrpll_params pll_params = {};
3141 	int ret;
3142 
3143 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3144 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3145 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3146 	else
3147 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3148 
3149 	if (ret)
3150 		return ret;
3151 
3152 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3153 
3154 	/* this is mainly for the fastset check */
3155 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
3156 
3157 	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
3158 							    &port_dpll->hw_state);
3159 
3160 	return 0;
3161 }
3162 
3163 static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3164 				  struct intel_crtc *crtc,
3165 				  struct intel_encoder *encoder)
3166 {
3167 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3168 	struct intel_crtc_state *crtc_state =
3169 		intel_atomic_get_new_crtc_state(state, crtc);
3170 	struct icl_port_dpll *port_dpll =
3171 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3172 	enum port port = encoder->port;
3173 	unsigned long dpll_mask;
3174 
3175 	if (IS_ALDERLAKE_S(dev_priv)) {
3176 		dpll_mask =
3177 			BIT(DPLL_ID_DG1_DPLL3) |
3178 			BIT(DPLL_ID_DG1_DPLL2) |
3179 			BIT(DPLL_ID_ICL_DPLL1) |
3180 			BIT(DPLL_ID_ICL_DPLL0);
3181 	} else if (IS_DG1(dev_priv)) {
3182 		if (port == PORT_D || port == PORT_E) {
3183 			dpll_mask =
3184 				BIT(DPLL_ID_DG1_DPLL2) |
3185 				BIT(DPLL_ID_DG1_DPLL3);
3186 		} else {
3187 			dpll_mask =
3188 				BIT(DPLL_ID_DG1_DPLL0) |
3189 				BIT(DPLL_ID_DG1_DPLL1);
3190 		}
3191 	} else if (IS_ROCKETLAKE(dev_priv)) {
3192 		dpll_mask =
3193 			BIT(DPLL_ID_EHL_DPLL4) |
3194 			BIT(DPLL_ID_ICL_DPLL1) |
3195 			BIT(DPLL_ID_ICL_DPLL0);
3196 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3197 		dpll_mask =
3198 			BIT(DPLL_ID_EHL_DPLL4) |
3199 			BIT(DPLL_ID_ICL_DPLL1) |
3200 			BIT(DPLL_ID_ICL_DPLL0);
3201 	} else {
3202 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3203 	}
3204 
3205 	/* Eliminate DPLLs from consideration if reserved by HTI */
3206 	dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
3207 
3208 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3209 						&port_dpll->hw_state,
3210 						dpll_mask);
3211 	if (!port_dpll->pll)
3212 		return -EINVAL;
3213 
3214 	intel_reference_shared_dpll(state, crtc,
3215 				    port_dpll->pll, &port_dpll->hw_state);
3216 
3217 	icl_update_active_dpll(state, crtc, encoder);
3218 
3219 	return 0;
3220 }
3221 
3222 static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
3223 				    struct intel_crtc *crtc)
3224 {
3225 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3226 	struct intel_crtc_state *crtc_state =
3227 		intel_atomic_get_new_crtc_state(state, crtc);
3228 	struct icl_port_dpll *port_dpll =
3229 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3230 	struct skl_wrpll_params pll_params = {};
3231 	int ret;
3232 
3233 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3234 	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3235 	if (ret)
3236 		return ret;
3237 
3238 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3239 
3240 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3241 	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3242 	if (ret)
3243 		return ret;
3244 
3245 	/* this is mainly for the fastset check */
3246 	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
3247 
3248 	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
3249 							 &port_dpll->hw_state);
3250 
3251 	return 0;
3252 }
3253 
3254 static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3255 				struct intel_crtc *crtc,
3256 				struct intel_encoder *encoder)
3257 {
3258 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3259 	struct intel_crtc_state *crtc_state =
3260 		intel_atomic_get_new_crtc_state(state, crtc);
3261 	struct icl_port_dpll *port_dpll =
3262 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3263 	enum intel_dpll_id dpll_id;
3264 	int ret;
3265 
3266 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3267 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3268 						&port_dpll->hw_state,
3269 						BIT(DPLL_ID_ICL_TBTPLL));
3270 	if (!port_dpll->pll)
3271 		return -EINVAL;
3272 	intel_reference_shared_dpll(state, crtc,
3273 				    port_dpll->pll, &port_dpll->hw_state);
3274 
3275 
3276 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3277 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3278 							 encoder->port));
3279 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3280 						&port_dpll->hw_state,
3281 						BIT(dpll_id));
3282 	if (!port_dpll->pll) {
3283 		ret = -EINVAL;
3284 		goto err_unreference_tbt_pll;
3285 	}
3286 	intel_reference_shared_dpll(state, crtc,
3287 				    port_dpll->pll, &port_dpll->hw_state);
3288 
3289 	icl_update_active_dpll(state, crtc, encoder);
3290 
3291 	return 0;
3292 
3293 err_unreference_tbt_pll:
3294 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3295 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3296 
3297 	return ret;
3298 }
3299 
3300 static int icl_compute_dplls(struct intel_atomic_state *state,
3301 			     struct intel_crtc *crtc,
3302 			     struct intel_encoder *encoder)
3303 {
3304 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3305 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3306 
3307 	if (intel_phy_is_combo(dev_priv, phy))
3308 		return icl_compute_combo_phy_dpll(state, crtc);
3309 	else if (intel_phy_is_tc(dev_priv, phy))
3310 		return icl_compute_tc_phy_dplls(state, crtc);
3311 
3312 	MISSING_CASE(phy);
3313 
3314 	return 0;
3315 }
3316 
3317 static int icl_get_dplls(struct intel_atomic_state *state,
3318 			 struct intel_crtc *crtc,
3319 			 struct intel_encoder *encoder)
3320 {
3321 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3322 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3323 
3324 	if (intel_phy_is_combo(dev_priv, phy))
3325 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3326 	else if (intel_phy_is_tc(dev_priv, phy))
3327 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3328 
3329 	MISSING_CASE(phy);
3330 
3331 	return -EINVAL;
3332 }
3333 
3334 static void icl_put_dplls(struct intel_atomic_state *state,
3335 			  struct intel_crtc *crtc)
3336 {
3337 	const struct intel_crtc_state *old_crtc_state =
3338 		intel_atomic_get_old_crtc_state(state, crtc);
3339 	struct intel_crtc_state *new_crtc_state =
3340 		intel_atomic_get_new_crtc_state(state, crtc);
3341 	enum icl_port_dpll_id id;
3342 
3343 	new_crtc_state->shared_dpll = NULL;
3344 
3345 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3346 		const struct icl_port_dpll *old_port_dpll =
3347 			&old_crtc_state->icl_port_dplls[id];
3348 		struct icl_port_dpll *new_port_dpll =
3349 			&new_crtc_state->icl_port_dplls[id];
3350 
3351 		new_port_dpll->pll = NULL;
3352 
3353 		if (!old_port_dpll->pll)
3354 			continue;
3355 
3356 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3357 	}
3358 }
3359 
3360 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3361 				struct intel_shared_dpll *pll,
3362 				struct intel_dpll_hw_state *hw_state)
3363 {
3364 	const enum intel_dpll_id id = pll->info->id;
3365 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3366 	intel_wakeref_t wakeref;
3367 	bool ret = false;
3368 	u32 val;
3369 
3370 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3371 
3372 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3373 						     POWER_DOMAIN_DISPLAY_CORE);
3374 	if (!wakeref)
3375 		return false;
3376 
3377 	val = intel_de_read(dev_priv, enable_reg);
3378 	if (!(val & PLL_ENABLE))
3379 		goto out;
3380 
3381 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3382 						  MG_REFCLKIN_CTL(tc_port));
3383 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3384 
3385 	hw_state->mg_clktop2_coreclkctl1 =
3386 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3387 	hw_state->mg_clktop2_coreclkctl1 &=
3388 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3389 
3390 	hw_state->mg_clktop2_hsclkctl =
3391 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3392 	hw_state->mg_clktop2_hsclkctl &=
3393 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3394 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3395 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3396 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3397 
3398 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3399 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3400 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3401 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3402 						   MG_PLL_FRAC_LOCK(tc_port));
3403 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3404 
3405 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3406 	hw_state->mg_pll_tdc_coldst_bias =
3407 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3408 
3409 	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3410 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3411 		hw_state->mg_pll_bias_mask = 0;
3412 	} else {
3413 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3414 		hw_state->mg_pll_bias_mask = -1U;
3415 	}
3416 
3417 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3418 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3419 
3420 	ret = true;
3421 out:
3422 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3423 	return ret;
3424 }
3425 
3426 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3427 				 struct intel_shared_dpll *pll,
3428 				 struct intel_dpll_hw_state *hw_state)
3429 {
3430 	const enum intel_dpll_id id = pll->info->id;
3431 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3432 	intel_wakeref_t wakeref;
3433 	bool ret = false;
3434 	u32 val;
3435 
3436 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3437 						     POWER_DOMAIN_DISPLAY_CORE);
3438 	if (!wakeref)
3439 		return false;
3440 
3441 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3442 	if (!(val & PLL_ENABLE))
3443 		goto out;
3444 
3445 	/*
3446 	 * All registers read here have the same HIP_INDEX_REG even though
3447 	 * they are on different building blocks
3448 	 */
3449 	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3450 						       DKL_REFCLKIN_CTL(tc_port));
3451 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3452 
3453 	hw_state->mg_clktop2_hsclkctl =
3454 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3455 	hw_state->mg_clktop2_hsclkctl &=
3456 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3457 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3458 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3459 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3460 
3461 	hw_state->mg_clktop2_coreclkctl1 =
3462 		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3463 	hw_state->mg_clktop2_coreclkctl1 &=
3464 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3465 
3466 	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3467 	val = DKL_PLL_DIV0_MASK;
3468 	if (dev_priv->display.vbt.override_afc_startup)
3469 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3470 	hw_state->mg_pll_div0 &= val;
3471 
3472 	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3473 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3474 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3475 
3476 	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3477 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3478 				 DKL_PLL_SSC_STEP_LEN_MASK |
3479 				 DKL_PLL_SSC_STEP_NUM_MASK |
3480 				 DKL_PLL_SSC_EN);
3481 
3482 	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3483 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3484 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3485 
3486 	hw_state->mg_pll_tdc_coldst_bias =
3487 		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3488 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3489 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3490 
3491 	ret = true;
3492 out:
3493 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3494 	return ret;
3495 }
3496 
3497 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3498 				 struct intel_shared_dpll *pll,
3499 				 struct intel_dpll_hw_state *hw_state,
3500 				 i915_reg_t enable_reg)
3501 {
3502 	const enum intel_dpll_id id = pll->info->id;
3503 	intel_wakeref_t wakeref;
3504 	bool ret = false;
3505 	u32 val;
3506 
3507 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3508 						     POWER_DOMAIN_DISPLAY_CORE);
3509 	if (!wakeref)
3510 		return false;
3511 
3512 	val = intel_de_read(dev_priv, enable_reg);
3513 	if (!(val & PLL_ENABLE))
3514 		goto out;
3515 
3516 	if (IS_ALDERLAKE_S(dev_priv)) {
3517 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3518 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3519 	} else if (IS_DG1(dev_priv)) {
3520 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3521 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3522 	} else if (IS_ROCKETLAKE(dev_priv)) {
3523 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3524 						 RKL_DPLL_CFGCR0(id));
3525 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3526 						 RKL_DPLL_CFGCR1(id));
3527 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3528 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3529 						 TGL_DPLL_CFGCR0(id));
3530 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3531 						 TGL_DPLL_CFGCR1(id));
3532 		if (dev_priv->display.vbt.override_afc_startup) {
3533 			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
3534 			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
3535 		}
3536 	} else {
3537 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3538 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3539 							 ICL_DPLL_CFGCR0(4));
3540 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3541 							 ICL_DPLL_CFGCR1(4));
3542 		} else {
3543 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3544 							 ICL_DPLL_CFGCR0(id));
3545 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3546 							 ICL_DPLL_CFGCR1(id));
3547 		}
3548 	}
3549 
3550 	ret = true;
3551 out:
3552 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3553 	return ret;
3554 }
3555 
3556 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3557 				   struct intel_shared_dpll *pll,
3558 				   struct intel_dpll_hw_state *hw_state)
3559 {
3560 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3561 
3562 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3563 }
3564 
3565 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3566 				 struct intel_shared_dpll *pll,
3567 				 struct intel_dpll_hw_state *hw_state)
3568 {
3569 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3570 }
3571 
3572 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3573 			   struct intel_shared_dpll *pll)
3574 {
3575 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3576 	const enum intel_dpll_id id = pll->info->id;
3577 	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3578 
3579 	if (IS_ALDERLAKE_S(dev_priv)) {
3580 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3581 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3582 	} else if (IS_DG1(dev_priv)) {
3583 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3584 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3585 	} else if (IS_ROCKETLAKE(dev_priv)) {
3586 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3587 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3588 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3589 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3590 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3591 		div0_reg = TGL_DPLL0_DIV0(id);
3592 	} else {
3593 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3594 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3595 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3596 		} else {
3597 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3598 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3599 		}
3600 	}
3601 
3602 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3603 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3604 	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3605 			 !i915_mmio_reg_valid(div0_reg));
3606 	if (dev_priv->display.vbt.override_afc_startup &&
3607 	    i915_mmio_reg_valid(div0_reg))
3608 		intel_de_rmw(dev_priv, div0_reg,
3609 			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3610 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3611 }
3612 
3613 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3614 			     struct intel_shared_dpll *pll)
3615 {
3616 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3617 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3618 
3619 	/*
3620 	 * Some of the following registers have reserved fields, so program
3621 	 * these with RMW based on a mask. The mask can be fixed or generated
3622 	 * during the calc/readout phase if the mask depends on some other HW
3623 	 * state like refclk, see icl_calc_mg_pll_state().
3624 	 */
3625 	intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
3626 		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3627 
3628 	intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
3629 		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
3630 		     hw_state->mg_clktop2_coreclkctl1);
3631 
3632 	intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
3633 		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3634 		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3635 		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3636 		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
3637 		     hw_state->mg_clktop2_hsclkctl);
3638 
3639 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3640 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3641 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3642 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3643 		       hw_state->mg_pll_frac_lock);
3644 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3645 
3646 	intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
3647 		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3648 
3649 	intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
3650 		     hw_state->mg_pll_tdc_coldst_bias_mask,
3651 		     hw_state->mg_pll_tdc_coldst_bias);
3652 
3653 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3654 }
3655 
3656 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3657 			  struct intel_shared_dpll *pll)
3658 {
3659 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3660 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3661 	u32 val;
3662 
3663 	/*
3664 	 * All registers programmed here have the same HIP_INDEX_REG even
3665 	 * though on different building block
3666 	 */
3667 	/* All the registers are RMW */
3668 	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3669 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3670 	val |= hw_state->mg_refclkin_ctl;
3671 	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3672 
3673 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3674 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3675 	val |= hw_state->mg_clktop2_coreclkctl1;
3676 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3677 
3678 	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3679 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3680 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3681 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3682 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3683 	val |= hw_state->mg_clktop2_hsclkctl;
3684 	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3685 
3686 	val = DKL_PLL_DIV0_MASK;
3687 	if (dev_priv->display.vbt.override_afc_startup)
3688 		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3689 	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3690 			  hw_state->mg_pll_div0);
3691 
3692 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3693 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3694 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3695 	val |= hw_state->mg_pll_div1;
3696 	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3697 
3698 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3699 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3700 		 DKL_PLL_SSC_STEP_LEN_MASK |
3701 		 DKL_PLL_SSC_STEP_NUM_MASK |
3702 		 DKL_PLL_SSC_EN);
3703 	val |= hw_state->mg_pll_ssc;
3704 	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3705 
3706 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3707 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3708 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3709 	val |= hw_state->mg_pll_bias;
3710 	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3711 
3712 	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3713 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3714 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3715 	val |= hw_state->mg_pll_tdc_coldst_bias;
3716 	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3717 
3718 	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3719 }
3720 
3721 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3722 				 struct intel_shared_dpll *pll,
3723 				 i915_reg_t enable_reg)
3724 {
3725 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3726 
3727 	/*
3728 	 * The spec says we need to "wait" but it also says it should be
3729 	 * immediate.
3730 	 */
3731 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3732 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3733 			pll->info->id);
3734 }
3735 
3736 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3737 			   struct intel_shared_dpll *pll,
3738 			   i915_reg_t enable_reg)
3739 {
3740 	intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3741 
3742 	/* Timeout is actually 600us. */
3743 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3744 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3745 }
3746 
3747 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3748 {
3749 	u32 val;
3750 
3751 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3752 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3753 		return;
3754 	/*
3755 	 * Wa_16011069516:adl-p[a0]
3756 	 *
3757 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3758 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3759 	 * sanity check this assumption with a double read, which presumably
3760 	 * returns the correct value even with clock gating on.
3761 	 *
3762 	 * Instead of the usual place for workarounds we apply this one here,
3763 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3764 	 */
3765 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3766 	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3767 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3768 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3769 }
3770 
3771 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3772 			     struct intel_shared_dpll *pll)
3773 {
3774 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3775 
3776 	if (IS_JSL_EHL(dev_priv) &&
3777 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3778 
3779 		/*
3780 		 * We need to disable DC states when this DPLL is enabled.
3781 		 * This can be done by taking a reference on DPLL4 power
3782 		 * domain.
3783 		 */
3784 		pll->wakeref = intel_display_power_get(dev_priv,
3785 						       POWER_DOMAIN_DC_OFF);
3786 	}
3787 
3788 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3789 
3790 	icl_dpll_write(dev_priv, pll);
3791 
3792 	/*
3793 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3794 	 * paths should already be setting the appropriate voltage, hence we do
3795 	 * nothing here.
3796 	 */
3797 
3798 	icl_pll_enable(dev_priv, pll, enable_reg);
3799 
3800 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3801 
3802 	/* DVFS post sequence would be here. See the comment above. */
3803 }
3804 
3805 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3806 			   struct intel_shared_dpll *pll)
3807 {
3808 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3809 
3810 	icl_dpll_write(dev_priv, pll);
3811 
3812 	/*
3813 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3814 	 * paths should already be setting the appropriate voltage, hence we do
3815 	 * nothing here.
3816 	 */
3817 
3818 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3819 
3820 	/* DVFS post sequence would be here. See the comment above. */
3821 }
3822 
3823 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3824 			  struct intel_shared_dpll *pll)
3825 {
3826 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3827 
3828 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3829 
3830 	if (DISPLAY_VER(dev_priv) >= 12)
3831 		dkl_pll_write(dev_priv, pll);
3832 	else
3833 		icl_mg_pll_write(dev_priv, pll);
3834 
3835 	/*
3836 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3837 	 * paths should already be setting the appropriate voltage, hence we do
3838 	 * nothing here.
3839 	 */
3840 
3841 	icl_pll_enable(dev_priv, pll, enable_reg);
3842 
3843 	/* DVFS post sequence would be here. See the comment above. */
3844 }
3845 
3846 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3847 			    struct intel_shared_dpll *pll,
3848 			    i915_reg_t enable_reg)
3849 {
3850 	/* The first steps are done by intel_ddi_post_disable(). */
3851 
3852 	/*
3853 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3854 	 * paths should already be setting the appropriate voltage, hence we do
3855 	 * nothing here.
3856 	 */
3857 
3858 	intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3859 
3860 	/* Timeout is actually 1us. */
3861 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3862 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3863 
3864 	/* DVFS post sequence would be here. See the comment above. */
3865 
3866 	intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3867 
3868 	/*
3869 	 * The spec says we need to "wait" but it also says it should be
3870 	 * immediate.
3871 	 */
3872 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3873 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3874 			pll->info->id);
3875 }
3876 
3877 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3878 			      struct intel_shared_dpll *pll)
3879 {
3880 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3881 
3882 	icl_pll_disable(dev_priv, pll, enable_reg);
3883 
3884 	if (IS_JSL_EHL(dev_priv) &&
3885 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3886 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3887 					pll->wakeref);
3888 }
3889 
3890 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3891 			    struct intel_shared_dpll *pll)
3892 {
3893 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3894 }
3895 
3896 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3897 			   struct intel_shared_dpll *pll)
3898 {
3899 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3900 
3901 	icl_pll_disable(dev_priv, pll, enable_reg);
3902 }
3903 
3904 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3905 {
3906 	/* No SSC ref */
3907 	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3908 }
3909 
3910 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3911 			      const struct intel_dpll_hw_state *hw_state)
3912 {
3913 	drm_dbg_kms(&dev_priv->drm,
3914 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3915 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3916 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3917 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3918 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3919 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3920 		    hw_state->cfgcr0, hw_state->cfgcr1,
3921 		    hw_state->div0,
3922 		    hw_state->mg_refclkin_ctl,
3923 		    hw_state->mg_clktop2_coreclkctl1,
3924 		    hw_state->mg_clktop2_hsclkctl,
3925 		    hw_state->mg_pll_div0,
3926 		    hw_state->mg_pll_div1,
3927 		    hw_state->mg_pll_lf,
3928 		    hw_state->mg_pll_frac_lock,
3929 		    hw_state->mg_pll_ssc,
3930 		    hw_state->mg_pll_bias,
3931 		    hw_state->mg_pll_tdc_coldst_bias);
3932 }
3933 
3934 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3935 	.enable = combo_pll_enable,
3936 	.disable = combo_pll_disable,
3937 	.get_hw_state = combo_pll_get_hw_state,
3938 	.get_freq = icl_ddi_combo_pll_get_freq,
3939 };
3940 
3941 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3942 	.enable = tbt_pll_enable,
3943 	.disable = tbt_pll_disable,
3944 	.get_hw_state = tbt_pll_get_hw_state,
3945 	.get_freq = icl_ddi_tbt_pll_get_freq,
3946 };
3947 
3948 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3949 	.enable = mg_pll_enable,
3950 	.disable = mg_pll_disable,
3951 	.get_hw_state = mg_pll_get_hw_state,
3952 	.get_freq = icl_ddi_mg_pll_get_freq,
3953 };
3954 
3955 static const struct dpll_info icl_plls[] = {
3956 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3957 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3958 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3959 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3960 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3961 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3962 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3963 	{ },
3964 };
3965 
3966 static const struct intel_dpll_mgr icl_pll_mgr = {
3967 	.dpll_info = icl_plls,
3968 	.compute_dplls = icl_compute_dplls,
3969 	.get_dplls = icl_get_dplls,
3970 	.put_dplls = icl_put_dplls,
3971 	.update_active_dpll = icl_update_active_dpll,
3972 	.update_ref_clks = icl_update_dpll_ref_clks,
3973 	.dump_hw_state = icl_dump_hw_state,
3974 };
3975 
3976 static const struct dpll_info ehl_plls[] = {
3977 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3978 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3979 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3980 	{ },
3981 };
3982 
3983 static const struct intel_dpll_mgr ehl_pll_mgr = {
3984 	.dpll_info = ehl_plls,
3985 	.compute_dplls = icl_compute_dplls,
3986 	.get_dplls = icl_get_dplls,
3987 	.put_dplls = icl_put_dplls,
3988 	.update_ref_clks = icl_update_dpll_ref_clks,
3989 	.dump_hw_state = icl_dump_hw_state,
3990 };
3991 
3992 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3993 	.enable = mg_pll_enable,
3994 	.disable = mg_pll_disable,
3995 	.get_hw_state = dkl_pll_get_hw_state,
3996 	.get_freq = icl_ddi_mg_pll_get_freq,
3997 };
3998 
3999 static const struct dpll_info tgl_plls[] = {
4000 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4001 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4002 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4003 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4004 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4005 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4006 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4007 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4008 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4009 	{ },
4010 };
4011 
4012 static const struct intel_dpll_mgr tgl_pll_mgr = {
4013 	.dpll_info = tgl_plls,
4014 	.compute_dplls = icl_compute_dplls,
4015 	.get_dplls = icl_get_dplls,
4016 	.put_dplls = icl_put_dplls,
4017 	.update_active_dpll = icl_update_active_dpll,
4018 	.update_ref_clks = icl_update_dpll_ref_clks,
4019 	.dump_hw_state = icl_dump_hw_state,
4020 };
4021 
4022 static const struct dpll_info rkl_plls[] = {
4023 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4024 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4025 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4026 	{ },
4027 };
4028 
4029 static const struct intel_dpll_mgr rkl_pll_mgr = {
4030 	.dpll_info = rkl_plls,
4031 	.compute_dplls = icl_compute_dplls,
4032 	.get_dplls = icl_get_dplls,
4033 	.put_dplls = icl_put_dplls,
4034 	.update_ref_clks = icl_update_dpll_ref_clks,
4035 	.dump_hw_state = icl_dump_hw_state,
4036 };
4037 
4038 static const struct dpll_info dg1_plls[] = {
4039 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4040 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4041 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4042 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4043 	{ },
4044 };
4045 
4046 static const struct intel_dpll_mgr dg1_pll_mgr = {
4047 	.dpll_info = dg1_plls,
4048 	.compute_dplls = icl_compute_dplls,
4049 	.get_dplls = icl_get_dplls,
4050 	.put_dplls = icl_put_dplls,
4051 	.update_ref_clks = icl_update_dpll_ref_clks,
4052 	.dump_hw_state = icl_dump_hw_state,
4053 };
4054 
4055 static const struct dpll_info adls_plls[] = {
4056 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4057 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4058 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4059 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4060 	{ },
4061 };
4062 
4063 static const struct intel_dpll_mgr adls_pll_mgr = {
4064 	.dpll_info = adls_plls,
4065 	.compute_dplls = icl_compute_dplls,
4066 	.get_dplls = icl_get_dplls,
4067 	.put_dplls = icl_put_dplls,
4068 	.update_ref_clks = icl_update_dpll_ref_clks,
4069 	.dump_hw_state = icl_dump_hw_state,
4070 };
4071 
4072 static const struct dpll_info adlp_plls[] = {
4073 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4074 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4075 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4076 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4077 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4078 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4079 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4080 	{ },
4081 };
4082 
4083 static const struct intel_dpll_mgr adlp_pll_mgr = {
4084 	.dpll_info = adlp_plls,
4085 	.compute_dplls = icl_compute_dplls,
4086 	.get_dplls = icl_get_dplls,
4087 	.put_dplls = icl_put_dplls,
4088 	.update_active_dpll = icl_update_active_dpll,
4089 	.update_ref_clks = icl_update_dpll_ref_clks,
4090 	.dump_hw_state = icl_dump_hw_state,
4091 };
4092 
4093 /**
4094  * intel_shared_dpll_init - Initialize shared DPLLs
4095  * @dev_priv: i915 device
4096  *
4097  * Initialize shared DPLLs for @dev_priv.
4098  */
4099 void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4100 {
4101 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4102 	const struct dpll_info *dpll_info;
4103 	int i;
4104 
4105 	mutex_init(&dev_priv->display.dpll.lock);
4106 
4107 	if (IS_DG2(dev_priv))
4108 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4109 		dpll_mgr = NULL;
4110 	else if (IS_ALDERLAKE_P(dev_priv))
4111 		dpll_mgr = &adlp_pll_mgr;
4112 	else if (IS_ALDERLAKE_S(dev_priv))
4113 		dpll_mgr = &adls_pll_mgr;
4114 	else if (IS_DG1(dev_priv))
4115 		dpll_mgr = &dg1_pll_mgr;
4116 	else if (IS_ROCKETLAKE(dev_priv))
4117 		dpll_mgr = &rkl_pll_mgr;
4118 	else if (DISPLAY_VER(dev_priv) >= 12)
4119 		dpll_mgr = &tgl_pll_mgr;
4120 	else if (IS_JSL_EHL(dev_priv))
4121 		dpll_mgr = &ehl_pll_mgr;
4122 	else if (DISPLAY_VER(dev_priv) >= 11)
4123 		dpll_mgr = &icl_pll_mgr;
4124 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4125 		dpll_mgr = &bxt_pll_mgr;
4126 	else if (DISPLAY_VER(dev_priv) == 9)
4127 		dpll_mgr = &skl_pll_mgr;
4128 	else if (HAS_DDI(dev_priv))
4129 		dpll_mgr = &hsw_pll_mgr;
4130 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4131 		dpll_mgr = &pch_pll_mgr;
4132 
4133 	if (!dpll_mgr) {
4134 		dev_priv->display.dpll.num_shared_dpll = 0;
4135 		return;
4136 	}
4137 
4138 	dpll_info = dpll_mgr->dpll_info;
4139 
4140 	for (i = 0; dpll_info[i].name; i++) {
4141 		if (drm_WARN_ON(&dev_priv->drm,
4142 				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
4143 			break;
4144 
4145 		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4146 		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4147 	}
4148 
4149 	dev_priv->display.dpll.mgr = dpll_mgr;
4150 	dev_priv->display.dpll.num_shared_dpll = i;
4151 }
4152 
4153 /**
4154  * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
4155  * @state: atomic state
4156  * @crtc: CRTC to compute DPLLs for
4157  * @encoder: encoder
4158  *
4159  * This function computes the DPLL state for the given CRTC and encoder.
4160  *
4161  * The new configuration in the atomic commit @state is made effective by
4162  * calling intel_shared_dpll_swap_state().
4163  *
4164  * Returns:
4165  * 0 on success, negative error code on falure.
4166  */
4167 int intel_compute_shared_dplls(struct intel_atomic_state *state,
4168 			       struct intel_crtc *crtc,
4169 			       struct intel_encoder *encoder)
4170 {
4171 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4172 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4173 
4174 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4175 		return -EINVAL;
4176 
4177 	return dpll_mgr->compute_dplls(state, crtc, encoder);
4178 }
4179 
4180 /**
4181  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4182  * @state: atomic state
4183  * @crtc: CRTC to reserve DPLLs for
4184  * @encoder: encoder
4185  *
4186  * This function reserves all required DPLLs for the given CRTC and encoder
4187  * combination in the current atomic commit @state and the new @crtc atomic
4188  * state.
4189  *
4190  * The new configuration in the atomic commit @state is made effective by
4191  * calling intel_shared_dpll_swap_state().
4192  *
4193  * The reserved DPLLs should be released by calling
4194  * intel_release_shared_dplls().
4195  *
4196  * Returns:
4197  * 0 if all required DPLLs were successfully reserved,
4198  * negative error code otherwise.
4199  */
4200 int intel_reserve_shared_dplls(struct intel_atomic_state *state,
4201 			       struct intel_crtc *crtc,
4202 			       struct intel_encoder *encoder)
4203 {
4204 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4205 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4206 
4207 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4208 		return -EINVAL;
4209 
4210 	return dpll_mgr->get_dplls(state, crtc, encoder);
4211 }
4212 
4213 /**
4214  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4215  * @state: atomic state
4216  * @crtc: crtc from which the DPLLs are to be released
4217  *
4218  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4219  * from the current atomic commit @state and the old @crtc atomic state.
4220  *
4221  * The new configuration in the atomic commit @state is made effective by
4222  * calling intel_shared_dpll_swap_state().
4223  */
4224 void intel_release_shared_dplls(struct intel_atomic_state *state,
4225 				struct intel_crtc *crtc)
4226 {
4227 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4228 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4229 
4230 	/*
4231 	 * FIXME: this function is called for every platform having a
4232 	 * compute_clock hook, even though the platform doesn't yet support
4233 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4234 	 * called on those.
4235 	 */
4236 	if (!dpll_mgr)
4237 		return;
4238 
4239 	dpll_mgr->put_dplls(state, crtc);
4240 }
4241 
4242 /**
4243  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4244  * @state: atomic state
4245  * @crtc: the CRTC for which to update the active DPLL
4246  * @encoder: encoder determining the type of port DPLL
4247  *
4248  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4249  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4250  * DPLL selected will be based on the current mode of the encoder's port.
4251  */
4252 void intel_update_active_dpll(struct intel_atomic_state *state,
4253 			      struct intel_crtc *crtc,
4254 			      struct intel_encoder *encoder)
4255 {
4256 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4257 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4258 
4259 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4260 		return;
4261 
4262 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4263 }
4264 
4265 /**
4266  * intel_dpll_get_freq - calculate the DPLL's output frequency
4267  * @i915: i915 device
4268  * @pll: DPLL for which to calculate the output frequency
4269  * @pll_state: DPLL state from which to calculate the output frequency
4270  *
4271  * Return the output frequency corresponding to @pll's passed in @pll_state.
4272  */
4273 int intel_dpll_get_freq(struct drm_i915_private *i915,
4274 			const struct intel_shared_dpll *pll,
4275 			const struct intel_dpll_hw_state *pll_state)
4276 {
4277 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4278 		return 0;
4279 
4280 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4281 }
4282 
4283 /**
4284  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4285  * @i915: i915 device
4286  * @pll: DPLL for which to calculate the output frequency
4287  * @hw_state: DPLL's hardware state
4288  *
4289  * Read out @pll's hardware state into @hw_state.
4290  */
4291 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4292 			     struct intel_shared_dpll *pll,
4293 			     struct intel_dpll_hw_state *hw_state)
4294 {
4295 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4296 }
4297 
4298 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4299 				  struct intel_shared_dpll *pll)
4300 {
4301 	struct intel_crtc *crtc;
4302 
4303 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4304 
4305 	if (IS_JSL_EHL(i915) && pll->on &&
4306 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4307 		pll->wakeref = intel_display_power_get(i915,
4308 						       POWER_DOMAIN_DC_OFF);
4309 	}
4310 
4311 	pll->state.pipe_mask = 0;
4312 	for_each_intel_crtc(&i915->drm, crtc) {
4313 		struct intel_crtc_state *crtc_state =
4314 			to_intel_crtc_state(crtc->base.state);
4315 
4316 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4317 			pll->state.pipe_mask |= BIT(crtc->pipe);
4318 	}
4319 	pll->active_mask = pll->state.pipe_mask;
4320 
4321 	drm_dbg_kms(&i915->drm,
4322 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4323 		    pll->info->name, pll->state.pipe_mask, pll->on);
4324 }
4325 
4326 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4327 {
4328 	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
4329 		i915->display.dpll.mgr->update_ref_clks(i915);
4330 }
4331 
4332 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4333 {
4334 	int i;
4335 
4336 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4337 		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4338 }
4339 
4340 static void sanitize_dpll_state(struct drm_i915_private *i915,
4341 				struct intel_shared_dpll *pll)
4342 {
4343 	if (!pll->on)
4344 		return;
4345 
4346 	adlp_cmtg_clock_gating_wa(i915, pll);
4347 
4348 	if (pll->active_mask)
4349 		return;
4350 
4351 	drm_dbg_kms(&i915->drm,
4352 		    "%s enabled but not in use, disabling\n",
4353 		    pll->info->name);
4354 
4355 	pll->info->funcs->disable(i915, pll);
4356 	pll->on = false;
4357 }
4358 
4359 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4360 {
4361 	int i;
4362 
4363 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4364 		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4365 }
4366 
4367 /**
4368  * intel_dpll_dump_hw_state - write hw_state to dmesg
4369  * @dev_priv: i915 drm device
4370  * @hw_state: hw state to be written to the log
4371  *
4372  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4373  */
4374 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4375 			      const struct intel_dpll_hw_state *hw_state)
4376 {
4377 	if (dev_priv->display.dpll.mgr) {
4378 		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4379 	} else {
4380 		/* fallback for platforms that don't use the shared dpll
4381 		 * infrastructure
4382 		 */
4383 		drm_dbg_kms(&dev_priv->drm,
4384 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4385 			    "fp0: 0x%x, fp1: 0x%x\n",
4386 			    hw_state->dpll,
4387 			    hw_state->dpll_md,
4388 			    hw_state->fp0,
4389 			    hw_state->fp1);
4390 	}
4391 }
4392 
4393 static void
4394 verify_single_dpll_state(struct drm_i915_private *dev_priv,
4395 			 struct intel_shared_dpll *pll,
4396 			 struct intel_crtc *crtc,
4397 			 struct intel_crtc_state *new_crtc_state)
4398 {
4399 	struct intel_dpll_hw_state dpll_hw_state;
4400 	u8 pipe_mask;
4401 	bool active;
4402 
4403 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
4404 
4405 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
4406 
4407 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
4408 
4409 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4410 		I915_STATE_WARN(!pll->on && pll->active_mask,
4411 				"pll in active use but not on in sw tracking\n");
4412 		I915_STATE_WARN(pll->on && !pll->active_mask,
4413 				"pll is on but not used by any active pipe\n");
4414 		I915_STATE_WARN(pll->on != active,
4415 				"pll on state mismatch (expected %i, found %i)\n",
4416 				pll->on, active);
4417 	}
4418 
4419 	if (!crtc) {
4420 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
4421 				"more active pll users than references: 0x%x vs 0x%x\n",
4422 				pll->active_mask, pll->state.pipe_mask);
4423 
4424 		return;
4425 	}
4426 
4427 	pipe_mask = BIT(crtc->pipe);
4428 
4429 	if (new_crtc_state->hw.active)
4430 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
4431 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
4432 				pipe_name(crtc->pipe), pll->active_mask);
4433 	else
4434 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4435 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
4436 				pipe_name(crtc->pipe), pll->active_mask);
4437 
4438 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
4439 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
4440 			pipe_mask, pll->state.pipe_mask);
4441 
4442 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
4443 					  &dpll_hw_state,
4444 					  sizeof(dpll_hw_state)),
4445 			"pll hw state mismatch\n");
4446 }
4447 
4448 void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
4449 				    struct intel_crtc_state *old_crtc_state,
4450 				    struct intel_crtc_state *new_crtc_state)
4451 {
4452 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4453 
4454 	if (new_crtc_state->shared_dpll)
4455 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
4456 					 crtc, new_crtc_state);
4457 
4458 	if (old_crtc_state->shared_dpll &&
4459 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
4460 		u8 pipe_mask = BIT(crtc->pipe);
4461 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
4462 
4463 		I915_STATE_WARN(pll->active_mask & pipe_mask,
4464 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
4465 				pipe_name(crtc->pipe), pll->active_mask);
4466 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
4467 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
4468 				pipe_name(crtc->pipe), pll->state.pipe_mask);
4469 	}
4470 }
4471 
4472 void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
4473 {
4474 	int i;
4475 
4476 	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
4477 		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4478 					 NULL, NULL);
4479 }
4480