1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_de.h"
25 #include "intel_display_types.h"
26 #include "intel_dpio_phy.h"
27 #include "intel_dpll.h"
28 #include "intel_dpll_mgr.h"
29 #include "intel_pch_refclk.h"
30 #include "intel_tc.h"
31 
32 /**
33  * DOC: Display PLLs
34  *
35  * Display PLLs used for driving outputs vary by platform. While some have
36  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
37  * from a pool. In the latter scenario, it is possible that multiple pipes
38  * share a PLL if their configurations match.
39  *
40  * This file provides an abstraction over display PLLs. The function
41  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
42  * users of a PLL are tracked and that tracking is integrated with the atomic
43  * modset interface. During an atomic operation, required PLLs can be reserved
44  * for a given CRTC and encoder configuration by calling
45  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
46  * with intel_release_shared_dplls().
47  * Changes to the users are first staged in the atomic state, and then made
48  * effective by calling intel_shared_dpll_swap_state() during the atomic
49  * commit phase.
50  */
51 
52 struct intel_dpll_mgr {
53 	const struct dpll_info *dpll_info;
54 
55 	bool (*get_dplls)(struct intel_atomic_state *state,
56 			  struct intel_crtc *crtc,
57 			  struct intel_encoder *encoder);
58 	void (*put_dplls)(struct intel_atomic_state *state,
59 			  struct intel_crtc *crtc);
60 	void (*update_active_dpll)(struct intel_atomic_state *state,
61 				   struct intel_crtc *crtc,
62 				   struct intel_encoder *encoder);
63 	void (*update_ref_clks)(struct drm_i915_private *i915);
64 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
65 			      const struct intel_dpll_hw_state *hw_state);
66 };
67 
68 static void
69 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
70 				  struct intel_shared_dpll_state *shared_dpll)
71 {
72 	enum intel_dpll_id i;
73 
74 	/* Copy shared dpll state */
75 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
76 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
77 
78 		shared_dpll[i] = pll->state;
79 	}
80 }
81 
82 static struct intel_shared_dpll_state *
83 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
84 {
85 	struct intel_atomic_state *state = to_intel_atomic_state(s);
86 
87 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
88 
89 	if (!state->dpll_set) {
90 		state->dpll_set = true;
91 
92 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
93 						  state->shared_dpll);
94 	}
95 
96 	return state->shared_dpll;
97 }
98 
99 /**
100  * intel_get_shared_dpll_by_id - get a DPLL given its id
101  * @dev_priv: i915 device instance
102  * @id: pll id
103  *
104  * Returns:
105  * A pointer to the DPLL with @id
106  */
107 struct intel_shared_dpll *
108 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
109 			    enum intel_dpll_id id)
110 {
111 	return &dev_priv->dpll.shared_dplls[id];
112 }
113 
114 /**
115  * intel_get_shared_dpll_id - get the id of a DPLL
116  * @dev_priv: i915 device instance
117  * @pll: the DPLL
118  *
119  * Returns:
120  * The id of @pll
121  */
122 enum intel_dpll_id
123 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
124 			 struct intel_shared_dpll *pll)
125 {
126 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
127 
128 	if (drm_WARN_ON(&dev_priv->drm,
129 			pll_idx < 0 ||
130 			pll_idx >= dev_priv->dpll.num_shared_dpll))
131 		return -1;
132 
133 	return pll_idx;
134 }
135 
136 /* For ILK+ */
137 void assert_shared_dpll(struct drm_i915_private *dev_priv,
138 			struct intel_shared_dpll *pll,
139 			bool state)
140 {
141 	bool cur_state;
142 	struct intel_dpll_hw_state hw_state;
143 
144 	if (drm_WARN(&dev_priv->drm, !pll,
145 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
146 		return;
147 
148 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
149 	I915_STATE_WARN(cur_state != state,
150 	     "%s assertion failure (expected %s, current %s)\n",
151 			pll->info->name, onoff(state), onoff(cur_state));
152 }
153 
154 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
155 {
156 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
157 }
158 
159 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
160 {
161 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
162 }
163 
164 static i915_reg_t
165 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
166 			   struct intel_shared_dpll *pll)
167 {
168 	if (IS_DG1(i915))
169 		return DG1_DPLL_ENABLE(pll->info->id);
170 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
171 		return MG_PLL_ENABLE(0);
172 
173 	return ICL_DPLL_ENABLE(pll->info->id);
174 }
175 
176 static i915_reg_t
177 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
178 			struct intel_shared_dpll *pll)
179 {
180 	const enum intel_dpll_id id = pll->info->id;
181 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
182 
183 	if (IS_ALDERLAKE_P(i915))
184 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
185 
186 	return MG_PLL_ENABLE(tc_port);
187 }
188 
189 /**
190  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
191  * @crtc_state: CRTC, and its state, which has a shared DPLL
192  *
193  * Enable the shared DPLL used by @crtc.
194  */
195 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
196 {
197 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
198 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
199 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
200 	unsigned int pipe_mask = BIT(crtc->pipe);
201 	unsigned int old_mask;
202 
203 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
204 		return;
205 
206 	mutex_lock(&dev_priv->dpll.lock);
207 	old_mask = pll->active_mask;
208 
209 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
210 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
211 		goto out;
212 
213 	pll->active_mask |= pipe_mask;
214 
215 	drm_dbg_kms(&dev_priv->drm,
216 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
217 		    pll->info->name, pll->active_mask, pll->on,
218 		    crtc->base.base.id, crtc->base.name);
219 
220 	if (old_mask) {
221 		drm_WARN_ON(&dev_priv->drm, !pll->on);
222 		assert_shared_dpll_enabled(dev_priv, pll);
223 		goto out;
224 	}
225 	drm_WARN_ON(&dev_priv->drm, pll->on);
226 
227 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
228 	pll->info->funcs->enable(dev_priv, pll);
229 	pll->on = true;
230 
231 out:
232 	mutex_unlock(&dev_priv->dpll.lock);
233 }
234 
235 /**
236  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
237  * @crtc_state: CRTC, and its state, which has a shared DPLL
238  *
239  * Disable the shared DPLL used by @crtc.
240  */
241 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
242 {
243 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
244 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
245 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
246 	unsigned int pipe_mask = BIT(crtc->pipe);
247 
248 	/* PCH only available on ILK+ */
249 	if (DISPLAY_VER(dev_priv) < 5)
250 		return;
251 
252 	if (pll == NULL)
253 		return;
254 
255 	mutex_lock(&dev_priv->dpll.lock);
256 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
257 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
258 		     crtc->base.base.id, crtc->base.name))
259 		goto out;
260 
261 	drm_dbg_kms(&dev_priv->drm,
262 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
263 		    pll->info->name, pll->active_mask, pll->on,
264 		    crtc->base.base.id, crtc->base.name);
265 
266 	assert_shared_dpll_enabled(dev_priv, pll);
267 	drm_WARN_ON(&dev_priv->drm, !pll->on);
268 
269 	pll->active_mask &= ~pipe_mask;
270 	if (pll->active_mask)
271 		goto out;
272 
273 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
274 	pll->info->funcs->disable(dev_priv, pll);
275 	pll->on = false;
276 
277 out:
278 	mutex_unlock(&dev_priv->dpll.lock);
279 }
280 
281 static struct intel_shared_dpll *
282 intel_find_shared_dpll(struct intel_atomic_state *state,
283 		       const struct intel_crtc *crtc,
284 		       const struct intel_dpll_hw_state *pll_state,
285 		       unsigned long dpll_mask)
286 {
287 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
288 	struct intel_shared_dpll *pll, *unused_pll = NULL;
289 	struct intel_shared_dpll_state *shared_dpll;
290 	enum intel_dpll_id i;
291 
292 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
293 
294 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
295 
296 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
297 		pll = &dev_priv->dpll.shared_dplls[i];
298 
299 		/* Only want to check enabled timings first */
300 		if (shared_dpll[i].pipe_mask == 0) {
301 			if (!unused_pll)
302 				unused_pll = pll;
303 			continue;
304 		}
305 
306 		if (memcmp(pll_state,
307 			   &shared_dpll[i].hw_state,
308 			   sizeof(*pll_state)) == 0) {
309 			drm_dbg_kms(&dev_priv->drm,
310 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
311 				    crtc->base.base.id, crtc->base.name,
312 				    pll->info->name,
313 				    shared_dpll[i].pipe_mask,
314 				    pll->active_mask);
315 			return pll;
316 		}
317 	}
318 
319 	/* Ok no matching timings, maybe there's a free one? */
320 	if (unused_pll) {
321 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
322 			    crtc->base.base.id, crtc->base.name,
323 			    unused_pll->info->name);
324 		return unused_pll;
325 	}
326 
327 	return NULL;
328 }
329 
330 static void
331 intel_reference_shared_dpll(struct intel_atomic_state *state,
332 			    const struct intel_crtc *crtc,
333 			    const struct intel_shared_dpll *pll,
334 			    const struct intel_dpll_hw_state *pll_state)
335 {
336 	struct drm_i915_private *i915 = to_i915(state->base.dev);
337 	struct intel_shared_dpll_state *shared_dpll;
338 	const enum intel_dpll_id id = pll->info->id;
339 
340 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
341 
342 	if (shared_dpll[id].pipe_mask == 0)
343 		shared_dpll[id].hw_state = *pll_state;
344 
345 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
346 		pipe_name(crtc->pipe));
347 
348 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
349 }
350 
351 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
352 					  const struct intel_crtc *crtc,
353 					  const struct intel_shared_dpll *pll)
354 {
355 	struct intel_shared_dpll_state *shared_dpll;
356 
357 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
358 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
359 }
360 
361 static void intel_put_dpll(struct intel_atomic_state *state,
362 			   struct intel_crtc *crtc)
363 {
364 	const struct intel_crtc_state *old_crtc_state =
365 		intel_atomic_get_old_crtc_state(state, crtc);
366 	struct intel_crtc_state *new_crtc_state =
367 		intel_atomic_get_new_crtc_state(state, crtc);
368 
369 	new_crtc_state->shared_dpll = NULL;
370 
371 	if (!old_crtc_state->shared_dpll)
372 		return;
373 
374 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
375 }
376 
377 /**
378  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
379  * @state: atomic state
380  *
381  * This is the dpll version of drm_atomic_helper_swap_state() since the
382  * helper does not handle driver-specific global state.
383  *
384  * For consistency with atomic helpers this function does a complete swap,
385  * i.e. it also puts the current state into @state, even though there is no
386  * need for that at this moment.
387  */
388 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
389 {
390 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
391 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
392 	enum intel_dpll_id i;
393 
394 	if (!state->dpll_set)
395 		return;
396 
397 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
398 		struct intel_shared_dpll *pll =
399 			&dev_priv->dpll.shared_dplls[i];
400 
401 		swap(pll->state, shared_dpll[i]);
402 	}
403 }
404 
405 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
406 				      struct intel_shared_dpll *pll,
407 				      struct intel_dpll_hw_state *hw_state)
408 {
409 	const enum intel_dpll_id id = pll->info->id;
410 	intel_wakeref_t wakeref;
411 	u32 val;
412 
413 	wakeref = intel_display_power_get_if_enabled(dev_priv,
414 						     POWER_DOMAIN_DISPLAY_CORE);
415 	if (!wakeref)
416 		return false;
417 
418 	val = intel_de_read(dev_priv, PCH_DPLL(id));
419 	hw_state->dpll = val;
420 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
421 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
422 
423 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
424 
425 	return val & DPLL_VCO_ENABLE;
426 }
427 
428 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
429 {
430 	u32 val;
431 	bool enabled;
432 
433 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
434 
435 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
436 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
437 			    DREF_SUPERSPREAD_SOURCE_MASK));
438 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
439 }
440 
441 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
442 				struct intel_shared_dpll *pll)
443 {
444 	const enum intel_dpll_id id = pll->info->id;
445 
446 	/* PCH refclock must be enabled first */
447 	ibx_assert_pch_refclk_enabled(dev_priv);
448 
449 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
450 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
451 
452 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
453 
454 	/* Wait for the clocks to stabilize. */
455 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
456 	udelay(150);
457 
458 	/* The pixel multiplier can only be updated once the
459 	 * DPLL is enabled and the clocks are stable.
460 	 *
461 	 * So write it again.
462 	 */
463 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
464 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
465 	udelay(200);
466 }
467 
468 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
469 				 struct intel_shared_dpll *pll)
470 {
471 	const enum intel_dpll_id id = pll->info->id;
472 
473 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
474 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
475 	udelay(200);
476 }
477 
478 static bool ibx_get_dpll(struct intel_atomic_state *state,
479 			 struct intel_crtc *crtc,
480 			 struct intel_encoder *encoder)
481 {
482 	struct intel_crtc_state *crtc_state =
483 		intel_atomic_get_new_crtc_state(state, crtc);
484 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
485 	struct intel_shared_dpll *pll;
486 	enum intel_dpll_id i;
487 
488 	if (HAS_PCH_IBX(dev_priv)) {
489 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
490 		i = (enum intel_dpll_id) crtc->pipe;
491 		pll = &dev_priv->dpll.shared_dplls[i];
492 
493 		drm_dbg_kms(&dev_priv->drm,
494 			    "[CRTC:%d:%s] using pre-allocated %s\n",
495 			    crtc->base.base.id, crtc->base.name,
496 			    pll->info->name);
497 	} else {
498 		pll = intel_find_shared_dpll(state, crtc,
499 					     &crtc_state->dpll_hw_state,
500 					     BIT(DPLL_ID_PCH_PLL_B) |
501 					     BIT(DPLL_ID_PCH_PLL_A));
502 	}
503 
504 	if (!pll)
505 		return false;
506 
507 	/* reference the pll */
508 	intel_reference_shared_dpll(state, crtc,
509 				    pll, &crtc_state->dpll_hw_state);
510 
511 	crtc_state->shared_dpll = pll;
512 
513 	return true;
514 }
515 
516 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
517 			      const struct intel_dpll_hw_state *hw_state)
518 {
519 	drm_dbg_kms(&dev_priv->drm,
520 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
521 		    "fp0: 0x%x, fp1: 0x%x\n",
522 		    hw_state->dpll,
523 		    hw_state->dpll_md,
524 		    hw_state->fp0,
525 		    hw_state->fp1);
526 }
527 
528 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
529 	.enable = ibx_pch_dpll_enable,
530 	.disable = ibx_pch_dpll_disable,
531 	.get_hw_state = ibx_pch_dpll_get_hw_state,
532 };
533 
534 static const struct dpll_info pch_plls[] = {
535 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
536 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
537 	{ },
538 };
539 
540 static const struct intel_dpll_mgr pch_pll_mgr = {
541 	.dpll_info = pch_plls,
542 	.get_dplls = ibx_get_dpll,
543 	.put_dplls = intel_put_dpll,
544 	.dump_hw_state = ibx_dump_hw_state,
545 };
546 
547 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
548 			       struct intel_shared_dpll *pll)
549 {
550 	const enum intel_dpll_id id = pll->info->id;
551 
552 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
553 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
554 	udelay(20);
555 }
556 
557 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
558 				struct intel_shared_dpll *pll)
559 {
560 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
561 	intel_de_posting_read(dev_priv, SPLL_CTL);
562 	udelay(20);
563 }
564 
565 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
566 				  struct intel_shared_dpll *pll)
567 {
568 	const enum intel_dpll_id id = pll->info->id;
569 	u32 val;
570 
571 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
572 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
573 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
574 
575 	/*
576 	 * Try to set up the PCH reference clock once all DPLLs
577 	 * that depend on it have been shut down.
578 	 */
579 	if (dev_priv->pch_ssc_use & BIT(id))
580 		intel_init_pch_refclk(dev_priv);
581 }
582 
583 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
584 				 struct intel_shared_dpll *pll)
585 {
586 	enum intel_dpll_id id = pll->info->id;
587 	u32 val;
588 
589 	val = intel_de_read(dev_priv, SPLL_CTL);
590 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
591 	intel_de_posting_read(dev_priv, SPLL_CTL);
592 
593 	/*
594 	 * Try to set up the PCH reference clock once all DPLLs
595 	 * that depend on it have been shut down.
596 	 */
597 	if (dev_priv->pch_ssc_use & BIT(id))
598 		intel_init_pch_refclk(dev_priv);
599 }
600 
601 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
602 				       struct intel_shared_dpll *pll,
603 				       struct intel_dpll_hw_state *hw_state)
604 {
605 	const enum intel_dpll_id id = pll->info->id;
606 	intel_wakeref_t wakeref;
607 	u32 val;
608 
609 	wakeref = intel_display_power_get_if_enabled(dev_priv,
610 						     POWER_DOMAIN_DISPLAY_CORE);
611 	if (!wakeref)
612 		return false;
613 
614 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
615 	hw_state->wrpll = val;
616 
617 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
618 
619 	return val & WRPLL_PLL_ENABLE;
620 }
621 
622 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
623 				      struct intel_shared_dpll *pll,
624 				      struct intel_dpll_hw_state *hw_state)
625 {
626 	intel_wakeref_t wakeref;
627 	u32 val;
628 
629 	wakeref = intel_display_power_get_if_enabled(dev_priv,
630 						     POWER_DOMAIN_DISPLAY_CORE);
631 	if (!wakeref)
632 		return false;
633 
634 	val = intel_de_read(dev_priv, SPLL_CTL);
635 	hw_state->spll = val;
636 
637 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
638 
639 	return val & SPLL_PLL_ENABLE;
640 }
641 
642 #define LC_FREQ 2700
643 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
644 
645 #define P_MIN 2
646 #define P_MAX 64
647 #define P_INC 2
648 
649 /* Constraints for PLL good behavior */
650 #define REF_MIN 48
651 #define REF_MAX 400
652 #define VCO_MIN 2400
653 #define VCO_MAX 4800
654 
655 struct hsw_wrpll_rnp {
656 	unsigned p, n2, r2;
657 };
658 
659 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
660 {
661 	unsigned budget;
662 
663 	switch (clock) {
664 	case 25175000:
665 	case 25200000:
666 	case 27000000:
667 	case 27027000:
668 	case 37762500:
669 	case 37800000:
670 	case 40500000:
671 	case 40541000:
672 	case 54000000:
673 	case 54054000:
674 	case 59341000:
675 	case 59400000:
676 	case 72000000:
677 	case 74176000:
678 	case 74250000:
679 	case 81000000:
680 	case 81081000:
681 	case 89012000:
682 	case 89100000:
683 	case 108000000:
684 	case 108108000:
685 	case 111264000:
686 	case 111375000:
687 	case 148352000:
688 	case 148500000:
689 	case 162000000:
690 	case 162162000:
691 	case 222525000:
692 	case 222750000:
693 	case 296703000:
694 	case 297000000:
695 		budget = 0;
696 		break;
697 	case 233500000:
698 	case 245250000:
699 	case 247750000:
700 	case 253250000:
701 	case 298000000:
702 		budget = 1500;
703 		break;
704 	case 169128000:
705 	case 169500000:
706 	case 179500000:
707 	case 202000000:
708 		budget = 2000;
709 		break;
710 	case 256250000:
711 	case 262500000:
712 	case 270000000:
713 	case 272500000:
714 	case 273750000:
715 	case 280750000:
716 	case 281250000:
717 	case 286000000:
718 	case 291750000:
719 		budget = 4000;
720 		break;
721 	case 267250000:
722 	case 268500000:
723 		budget = 5000;
724 		break;
725 	default:
726 		budget = 1000;
727 		break;
728 	}
729 
730 	return budget;
731 }
732 
733 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
734 				 unsigned int r2, unsigned int n2,
735 				 unsigned int p,
736 				 struct hsw_wrpll_rnp *best)
737 {
738 	u64 a, b, c, d, diff, diff_best;
739 
740 	/* No best (r,n,p) yet */
741 	if (best->p == 0) {
742 		best->p = p;
743 		best->n2 = n2;
744 		best->r2 = r2;
745 		return;
746 	}
747 
748 	/*
749 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
750 	 * freq2k.
751 	 *
752 	 * delta = 1e6 *
753 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
754 	 *	   freq2k;
755 	 *
756 	 * and we would like delta <= budget.
757 	 *
758 	 * If the discrepancy is above the PPM-based budget, always prefer to
759 	 * improve upon the previous solution.  However, if you're within the
760 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
761 	 */
762 	a = freq2k * budget * p * r2;
763 	b = freq2k * budget * best->p * best->r2;
764 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
765 	diff_best = abs_diff(freq2k * best->p * best->r2,
766 			     LC_FREQ_2K * best->n2);
767 	c = 1000000 * diff;
768 	d = 1000000 * diff_best;
769 
770 	if (a < c && b < d) {
771 		/* If both are above the budget, pick the closer */
772 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
773 			best->p = p;
774 			best->n2 = n2;
775 			best->r2 = r2;
776 		}
777 	} else if (a >= c && b < d) {
778 		/* If A is below the threshold but B is above it?  Update. */
779 		best->p = p;
780 		best->n2 = n2;
781 		best->r2 = r2;
782 	} else if (a >= c && b >= d) {
783 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
784 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
785 			best->p = p;
786 			best->n2 = n2;
787 			best->r2 = r2;
788 		}
789 	}
790 	/* Otherwise a < c && b >= d, do nothing */
791 }
792 
793 static void
794 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
795 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
796 {
797 	u64 freq2k;
798 	unsigned p, n2, r2;
799 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
800 	unsigned budget;
801 
802 	freq2k = clock / 100;
803 
804 	budget = hsw_wrpll_get_budget_for_freq(clock);
805 
806 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
807 	 * and directly pass the LC PLL to it. */
808 	if (freq2k == 5400000) {
809 		*n2_out = 2;
810 		*p_out = 1;
811 		*r2_out = 2;
812 		return;
813 	}
814 
815 	/*
816 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
817 	 * the WR PLL.
818 	 *
819 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
820 	 * Injecting R2 = 2 * R gives:
821 	 *   REF_MAX * r2 > LC_FREQ * 2 and
822 	 *   REF_MIN * r2 < LC_FREQ * 2
823 	 *
824 	 * Which means the desired boundaries for r2 are:
825 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
826 	 *
827 	 */
828 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
829 	     r2 <= LC_FREQ * 2 / REF_MIN;
830 	     r2++) {
831 
832 		/*
833 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
834 		 *
835 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
836 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
837 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
838 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
839 		 *
840 		 * Which means the desired boundaries for n2 are:
841 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
842 		 */
843 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
844 		     n2 <= VCO_MAX * r2 / LC_FREQ;
845 		     n2++) {
846 
847 			for (p = P_MIN; p <= P_MAX; p += P_INC)
848 				hsw_wrpll_update_rnp(freq2k, budget,
849 						     r2, n2, p, &best);
850 		}
851 	}
852 
853 	*n2_out = best.n2;
854 	*p_out = best.p;
855 	*r2_out = best.r2;
856 }
857 
858 static struct intel_shared_dpll *
859 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
860 		       struct intel_crtc *crtc)
861 {
862 	struct intel_crtc_state *crtc_state =
863 		intel_atomic_get_new_crtc_state(state, crtc);
864 	struct intel_shared_dpll *pll;
865 	u32 val;
866 	unsigned int p, n2, r2;
867 
868 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
869 
870 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
871 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
872 	      WRPLL_DIVIDER_POST(p);
873 
874 	crtc_state->dpll_hw_state.wrpll = val;
875 
876 	pll = intel_find_shared_dpll(state, crtc,
877 				     &crtc_state->dpll_hw_state,
878 				     BIT(DPLL_ID_WRPLL2) |
879 				     BIT(DPLL_ID_WRPLL1));
880 
881 	if (!pll)
882 		return NULL;
883 
884 	return pll;
885 }
886 
887 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
888 				  const struct intel_shared_dpll *pll,
889 				  const struct intel_dpll_hw_state *pll_state)
890 {
891 	int refclk;
892 	int n, p, r;
893 	u32 wrpll = pll_state->wrpll;
894 
895 	switch (wrpll & WRPLL_REF_MASK) {
896 	case WRPLL_REF_SPECIAL_HSW:
897 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
898 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
899 			refclk = dev_priv->dpll.ref_clks.nssc;
900 			break;
901 		}
902 		fallthrough;
903 	case WRPLL_REF_PCH_SSC:
904 		/*
905 		 * We could calculate spread here, but our checking
906 		 * code only cares about 5% accuracy, and spread is a max of
907 		 * 0.5% downspread.
908 		 */
909 		refclk = dev_priv->dpll.ref_clks.ssc;
910 		break;
911 	case WRPLL_REF_LCPLL:
912 		refclk = 2700000;
913 		break;
914 	default:
915 		MISSING_CASE(wrpll);
916 		return 0;
917 	}
918 
919 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
920 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
921 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
922 
923 	/* Convert to KHz, p & r have a fixed point portion */
924 	return (refclk * n / 10) / (p * r) * 2;
925 }
926 
927 static struct intel_shared_dpll *
928 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
929 {
930 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
931 	struct intel_shared_dpll *pll;
932 	enum intel_dpll_id pll_id;
933 	int clock = crtc_state->port_clock;
934 
935 	switch (clock / 2) {
936 	case 81000:
937 		pll_id = DPLL_ID_LCPLL_810;
938 		break;
939 	case 135000:
940 		pll_id = DPLL_ID_LCPLL_1350;
941 		break;
942 	case 270000:
943 		pll_id = DPLL_ID_LCPLL_2700;
944 		break;
945 	default:
946 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
947 			    clock);
948 		return NULL;
949 	}
950 
951 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
952 
953 	if (!pll)
954 		return NULL;
955 
956 	return pll;
957 }
958 
959 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
960 				  const struct intel_shared_dpll *pll,
961 				  const struct intel_dpll_hw_state *pll_state)
962 {
963 	int link_clock = 0;
964 
965 	switch (pll->info->id) {
966 	case DPLL_ID_LCPLL_810:
967 		link_clock = 81000;
968 		break;
969 	case DPLL_ID_LCPLL_1350:
970 		link_clock = 135000;
971 		break;
972 	case DPLL_ID_LCPLL_2700:
973 		link_clock = 270000;
974 		break;
975 	default:
976 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
977 		break;
978 	}
979 
980 	return link_clock * 2;
981 }
982 
983 static struct intel_shared_dpll *
984 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
985 		      struct intel_crtc *crtc)
986 {
987 	struct intel_crtc_state *crtc_state =
988 		intel_atomic_get_new_crtc_state(state, crtc);
989 
990 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
991 		return NULL;
992 
993 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
994 					 SPLL_REF_MUXED_SSC;
995 
996 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
997 				      BIT(DPLL_ID_SPLL));
998 }
999 
1000 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1001 				 const struct intel_shared_dpll *pll,
1002 				 const struct intel_dpll_hw_state *pll_state)
1003 {
1004 	int link_clock = 0;
1005 
1006 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1007 	case SPLL_FREQ_810MHz:
1008 		link_clock = 81000;
1009 		break;
1010 	case SPLL_FREQ_1350MHz:
1011 		link_clock = 135000;
1012 		break;
1013 	case SPLL_FREQ_2700MHz:
1014 		link_clock = 270000;
1015 		break;
1016 	default:
1017 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1018 		break;
1019 	}
1020 
1021 	return link_clock * 2;
1022 }
1023 
1024 static bool hsw_get_dpll(struct intel_atomic_state *state,
1025 			 struct intel_crtc *crtc,
1026 			 struct intel_encoder *encoder)
1027 {
1028 	struct intel_crtc_state *crtc_state =
1029 		intel_atomic_get_new_crtc_state(state, crtc);
1030 	struct intel_shared_dpll *pll;
1031 
1032 	memset(&crtc_state->dpll_hw_state, 0,
1033 	       sizeof(crtc_state->dpll_hw_state));
1034 
1035 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1036 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1037 	else if (intel_crtc_has_dp_encoder(crtc_state))
1038 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1039 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1040 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1041 	else
1042 		return false;
1043 
1044 	if (!pll)
1045 		return false;
1046 
1047 	intel_reference_shared_dpll(state, crtc,
1048 				    pll, &crtc_state->dpll_hw_state);
1049 
1050 	crtc_state->shared_dpll = pll;
1051 
1052 	return true;
1053 }
1054 
1055 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1056 {
1057 	i915->dpll.ref_clks.ssc = 135000;
1058 	/* Non-SSC is only used on non-ULT HSW. */
1059 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1060 		i915->dpll.ref_clks.nssc = 24000;
1061 	else
1062 		i915->dpll.ref_clks.nssc = 135000;
1063 }
1064 
1065 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1066 			      const struct intel_dpll_hw_state *hw_state)
1067 {
1068 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1069 		    hw_state->wrpll, hw_state->spll);
1070 }
1071 
1072 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1073 	.enable = hsw_ddi_wrpll_enable,
1074 	.disable = hsw_ddi_wrpll_disable,
1075 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1076 	.get_freq = hsw_ddi_wrpll_get_freq,
1077 };
1078 
1079 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1080 	.enable = hsw_ddi_spll_enable,
1081 	.disable = hsw_ddi_spll_disable,
1082 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1083 	.get_freq = hsw_ddi_spll_get_freq,
1084 };
1085 
1086 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1087 				 struct intel_shared_dpll *pll)
1088 {
1089 }
1090 
1091 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1092 				  struct intel_shared_dpll *pll)
1093 {
1094 }
1095 
1096 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1097 				       struct intel_shared_dpll *pll,
1098 				       struct intel_dpll_hw_state *hw_state)
1099 {
1100 	return true;
1101 }
1102 
1103 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1104 	.enable = hsw_ddi_lcpll_enable,
1105 	.disable = hsw_ddi_lcpll_disable,
1106 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1107 	.get_freq = hsw_ddi_lcpll_get_freq,
1108 };
1109 
1110 static const struct dpll_info hsw_plls[] = {
1111 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1112 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1113 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1114 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1115 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1116 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1117 	{ },
1118 };
1119 
1120 static const struct intel_dpll_mgr hsw_pll_mgr = {
1121 	.dpll_info = hsw_plls,
1122 	.get_dplls = hsw_get_dpll,
1123 	.put_dplls = intel_put_dpll,
1124 	.update_ref_clks = hsw_update_dpll_ref_clks,
1125 	.dump_hw_state = hsw_dump_hw_state,
1126 };
1127 
1128 struct skl_dpll_regs {
1129 	i915_reg_t ctl, cfgcr1, cfgcr2;
1130 };
1131 
1132 /* this array is indexed by the *shared* pll id */
1133 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1134 	{
1135 		/* DPLL 0 */
1136 		.ctl = LCPLL1_CTL,
1137 		/* DPLL 0 doesn't support HDMI mode */
1138 	},
1139 	{
1140 		/* DPLL 1 */
1141 		.ctl = LCPLL2_CTL,
1142 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1143 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1144 	},
1145 	{
1146 		/* DPLL 2 */
1147 		.ctl = WRPLL_CTL(0),
1148 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1149 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1150 	},
1151 	{
1152 		/* DPLL 3 */
1153 		.ctl = WRPLL_CTL(1),
1154 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1155 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1156 	},
1157 };
1158 
1159 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1160 				    struct intel_shared_dpll *pll)
1161 {
1162 	const enum intel_dpll_id id = pll->info->id;
1163 	u32 val;
1164 
1165 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1166 
1167 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1168 		 DPLL_CTRL1_SSC(id) |
1169 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1170 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1171 
1172 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1173 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1174 }
1175 
1176 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1177 			       struct intel_shared_dpll *pll)
1178 {
1179 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1180 	const enum intel_dpll_id id = pll->info->id;
1181 
1182 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1183 
1184 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1185 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1186 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1187 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1188 
1189 	/* the enable bit is always bit 31 */
1190 	intel_de_write(dev_priv, regs[id].ctl,
1191 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1192 
1193 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1194 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1195 }
1196 
1197 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1198 				 struct intel_shared_dpll *pll)
1199 {
1200 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1201 }
1202 
1203 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1204 				struct intel_shared_dpll *pll)
1205 {
1206 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1207 	const enum intel_dpll_id id = pll->info->id;
1208 
1209 	/* the enable bit is always bit 31 */
1210 	intel_de_write(dev_priv, regs[id].ctl,
1211 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1212 	intel_de_posting_read(dev_priv, regs[id].ctl);
1213 }
1214 
1215 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1216 				  struct intel_shared_dpll *pll)
1217 {
1218 }
1219 
1220 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1221 				     struct intel_shared_dpll *pll,
1222 				     struct intel_dpll_hw_state *hw_state)
1223 {
1224 	u32 val;
1225 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1226 	const enum intel_dpll_id id = pll->info->id;
1227 	intel_wakeref_t wakeref;
1228 	bool ret;
1229 
1230 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1231 						     POWER_DOMAIN_DISPLAY_CORE);
1232 	if (!wakeref)
1233 		return false;
1234 
1235 	ret = false;
1236 
1237 	val = intel_de_read(dev_priv, regs[id].ctl);
1238 	if (!(val & LCPLL_PLL_ENABLE))
1239 		goto out;
1240 
1241 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1242 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1243 
1244 	/* avoid reading back stale values if HDMI mode is not enabled */
1245 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1246 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1247 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1248 	}
1249 	ret = true;
1250 
1251 out:
1252 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1253 
1254 	return ret;
1255 }
1256 
1257 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1258 				       struct intel_shared_dpll *pll,
1259 				       struct intel_dpll_hw_state *hw_state)
1260 {
1261 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1262 	const enum intel_dpll_id id = pll->info->id;
1263 	intel_wakeref_t wakeref;
1264 	u32 val;
1265 	bool ret;
1266 
1267 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1268 						     POWER_DOMAIN_DISPLAY_CORE);
1269 	if (!wakeref)
1270 		return false;
1271 
1272 	ret = false;
1273 
1274 	/* DPLL0 is always enabled since it drives CDCLK */
1275 	val = intel_de_read(dev_priv, regs[id].ctl);
1276 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1277 		goto out;
1278 
1279 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1280 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1281 
1282 	ret = true;
1283 
1284 out:
1285 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1286 
1287 	return ret;
1288 }
1289 
1290 struct skl_wrpll_context {
1291 	u64 min_deviation;		/* current minimal deviation */
1292 	u64 central_freq;		/* chosen central freq */
1293 	u64 dco_freq;			/* chosen dco freq */
1294 	unsigned int p;			/* chosen divider */
1295 };
1296 
1297 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1298 {
1299 	memset(ctx, 0, sizeof(*ctx));
1300 
1301 	ctx->min_deviation = U64_MAX;
1302 }
1303 
1304 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1305 #define SKL_DCO_MAX_PDEVIATION	100
1306 #define SKL_DCO_MAX_NDEVIATION	600
1307 
1308 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1309 				  u64 central_freq,
1310 				  u64 dco_freq,
1311 				  unsigned int divider)
1312 {
1313 	u64 deviation;
1314 
1315 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1316 			      central_freq);
1317 
1318 	/* positive deviation */
1319 	if (dco_freq >= central_freq) {
1320 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1321 		    deviation < ctx->min_deviation) {
1322 			ctx->min_deviation = deviation;
1323 			ctx->central_freq = central_freq;
1324 			ctx->dco_freq = dco_freq;
1325 			ctx->p = divider;
1326 		}
1327 	/* negative deviation */
1328 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1329 		   deviation < ctx->min_deviation) {
1330 		ctx->min_deviation = deviation;
1331 		ctx->central_freq = central_freq;
1332 		ctx->dco_freq = dco_freq;
1333 		ctx->p = divider;
1334 	}
1335 }
1336 
1337 static void skl_wrpll_get_multipliers(unsigned int p,
1338 				      unsigned int *p0 /* out */,
1339 				      unsigned int *p1 /* out */,
1340 				      unsigned int *p2 /* out */)
1341 {
1342 	/* even dividers */
1343 	if (p % 2 == 0) {
1344 		unsigned int half = p / 2;
1345 
1346 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1347 			*p0 = 2;
1348 			*p1 = 1;
1349 			*p2 = half;
1350 		} else if (half % 2 == 0) {
1351 			*p0 = 2;
1352 			*p1 = half / 2;
1353 			*p2 = 2;
1354 		} else if (half % 3 == 0) {
1355 			*p0 = 3;
1356 			*p1 = half / 3;
1357 			*p2 = 2;
1358 		} else if (half % 7 == 0) {
1359 			*p0 = 7;
1360 			*p1 = half / 7;
1361 			*p2 = 2;
1362 		}
1363 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1364 		*p0 = 3;
1365 		*p1 = 1;
1366 		*p2 = p / 3;
1367 	} else if (p == 5 || p == 7) {
1368 		*p0 = p;
1369 		*p1 = 1;
1370 		*p2 = 1;
1371 	} else if (p == 15) {
1372 		*p0 = 3;
1373 		*p1 = 1;
1374 		*p2 = 5;
1375 	} else if (p == 21) {
1376 		*p0 = 7;
1377 		*p1 = 1;
1378 		*p2 = 3;
1379 	} else if (p == 35) {
1380 		*p0 = 7;
1381 		*p1 = 1;
1382 		*p2 = 5;
1383 	}
1384 }
1385 
1386 struct skl_wrpll_params {
1387 	u32 dco_fraction;
1388 	u32 dco_integer;
1389 	u32 qdiv_ratio;
1390 	u32 qdiv_mode;
1391 	u32 kdiv;
1392 	u32 pdiv;
1393 	u32 central_freq;
1394 };
1395 
1396 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1397 				      u64 afe_clock,
1398 				      int ref_clock,
1399 				      u64 central_freq,
1400 				      u32 p0, u32 p1, u32 p2)
1401 {
1402 	u64 dco_freq;
1403 
1404 	switch (central_freq) {
1405 	case 9600000000ULL:
1406 		params->central_freq = 0;
1407 		break;
1408 	case 9000000000ULL:
1409 		params->central_freq = 1;
1410 		break;
1411 	case 8400000000ULL:
1412 		params->central_freq = 3;
1413 	}
1414 
1415 	switch (p0) {
1416 	case 1:
1417 		params->pdiv = 0;
1418 		break;
1419 	case 2:
1420 		params->pdiv = 1;
1421 		break;
1422 	case 3:
1423 		params->pdiv = 2;
1424 		break;
1425 	case 7:
1426 		params->pdiv = 4;
1427 		break;
1428 	default:
1429 		WARN(1, "Incorrect PDiv\n");
1430 	}
1431 
1432 	switch (p2) {
1433 	case 5:
1434 		params->kdiv = 0;
1435 		break;
1436 	case 2:
1437 		params->kdiv = 1;
1438 		break;
1439 	case 3:
1440 		params->kdiv = 2;
1441 		break;
1442 	case 1:
1443 		params->kdiv = 3;
1444 		break;
1445 	default:
1446 		WARN(1, "Incorrect KDiv\n");
1447 	}
1448 
1449 	params->qdiv_ratio = p1;
1450 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1451 
1452 	dco_freq = p0 * p1 * p2 * afe_clock;
1453 
1454 	/*
1455 	 * Intermediate values are in Hz.
1456 	 * Divide by MHz to match bsepc
1457 	 */
1458 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1459 	params->dco_fraction =
1460 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1461 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1462 }
1463 
1464 static bool
1465 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1466 			int ref_clock,
1467 			struct skl_wrpll_params *wrpll_params)
1468 {
1469 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1470 	u64 dco_central_freq[3] = { 8400000000ULL,
1471 				    9000000000ULL,
1472 				    9600000000ULL };
1473 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1474 					     24, 28, 30, 32, 36, 40, 42, 44,
1475 					     48, 52, 54, 56, 60, 64, 66, 68,
1476 					     70, 72, 76, 78, 80, 84, 88, 90,
1477 					     92, 96, 98 };
1478 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1479 	static const struct {
1480 		const int *list;
1481 		int n_dividers;
1482 	} dividers[] = {
1483 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1484 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1485 	};
1486 	struct skl_wrpll_context ctx;
1487 	unsigned int dco, d, i;
1488 	unsigned int p0, p1, p2;
1489 
1490 	skl_wrpll_context_init(&ctx);
1491 
1492 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1493 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1494 			for (i = 0; i < dividers[d].n_dividers; i++) {
1495 				unsigned int p = dividers[d].list[i];
1496 				u64 dco_freq = p * afe_clock;
1497 
1498 				skl_wrpll_try_divider(&ctx,
1499 						      dco_central_freq[dco],
1500 						      dco_freq,
1501 						      p);
1502 				/*
1503 				 * Skip the remaining dividers if we're sure to
1504 				 * have found the definitive divider, we can't
1505 				 * improve a 0 deviation.
1506 				 */
1507 				if (ctx.min_deviation == 0)
1508 					goto skip_remaining_dividers;
1509 			}
1510 		}
1511 
1512 skip_remaining_dividers:
1513 		/*
1514 		 * If a solution is found with an even divider, prefer
1515 		 * this one.
1516 		 */
1517 		if (d == 0 && ctx.p)
1518 			break;
1519 	}
1520 
1521 	if (!ctx.p) {
1522 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1523 		return false;
1524 	}
1525 
1526 	/*
1527 	 * gcc incorrectly analyses that these can be used without being
1528 	 * initialized. To be fair, it's hard to guess.
1529 	 */
1530 	p0 = p1 = p2 = 0;
1531 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1532 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1533 				  ctx.central_freq, p0, p1, p2);
1534 
1535 	return true;
1536 }
1537 
1538 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1539 {
1540 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1541 	u32 ctrl1, cfgcr1, cfgcr2;
1542 	struct skl_wrpll_params wrpll_params = { 0, };
1543 
1544 	/*
1545 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1546 	 * as the DPLL id in this function.
1547 	 */
1548 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1549 
1550 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1551 
1552 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1553 				     i915->dpll.ref_clks.nssc,
1554 				     &wrpll_params))
1555 		return false;
1556 
1557 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1558 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1559 		wrpll_params.dco_integer;
1560 
1561 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1562 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1563 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1564 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1565 		wrpll_params.central_freq;
1566 
1567 	memset(&crtc_state->dpll_hw_state, 0,
1568 	       sizeof(crtc_state->dpll_hw_state));
1569 
1570 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1571 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1572 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1573 	return true;
1574 }
1575 
1576 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1577 				  const struct intel_shared_dpll *pll,
1578 				  const struct intel_dpll_hw_state *pll_state)
1579 {
1580 	int ref_clock = i915->dpll.ref_clks.nssc;
1581 	u32 p0, p1, p2, dco_freq;
1582 
1583 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1584 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1585 
1586 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1587 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1588 	else
1589 		p1 = 1;
1590 
1591 
1592 	switch (p0) {
1593 	case DPLL_CFGCR2_PDIV_1:
1594 		p0 = 1;
1595 		break;
1596 	case DPLL_CFGCR2_PDIV_2:
1597 		p0 = 2;
1598 		break;
1599 	case DPLL_CFGCR2_PDIV_3:
1600 		p0 = 3;
1601 		break;
1602 	case DPLL_CFGCR2_PDIV_7_INVALID:
1603 		/*
1604 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1605 		 * handling it the same way as PDIV_7.
1606 		 */
1607 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1608 		fallthrough;
1609 	case DPLL_CFGCR2_PDIV_7:
1610 		p0 = 7;
1611 		break;
1612 	default:
1613 		MISSING_CASE(p0);
1614 		return 0;
1615 	}
1616 
1617 	switch (p2) {
1618 	case DPLL_CFGCR2_KDIV_5:
1619 		p2 = 5;
1620 		break;
1621 	case DPLL_CFGCR2_KDIV_2:
1622 		p2 = 2;
1623 		break;
1624 	case DPLL_CFGCR2_KDIV_3:
1625 		p2 = 3;
1626 		break;
1627 	case DPLL_CFGCR2_KDIV_1:
1628 		p2 = 1;
1629 		break;
1630 	default:
1631 		MISSING_CASE(p2);
1632 		return 0;
1633 	}
1634 
1635 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1636 		   ref_clock;
1637 
1638 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1639 		    ref_clock / 0x8000;
1640 
1641 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1642 		return 0;
1643 
1644 	return dco_freq / (p0 * p1 * p2 * 5);
1645 }
1646 
1647 static bool
1648 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1649 {
1650 	u32 ctrl1;
1651 
1652 	/*
1653 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1654 	 * as the DPLL id in this function.
1655 	 */
1656 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1657 	switch (crtc_state->port_clock / 2) {
1658 	case 81000:
1659 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1660 		break;
1661 	case 135000:
1662 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1663 		break;
1664 	case 270000:
1665 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1666 		break;
1667 		/* eDP 1.4 rates */
1668 	case 162000:
1669 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1670 		break;
1671 	case 108000:
1672 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1673 		break;
1674 	case 216000:
1675 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1676 		break;
1677 	}
1678 
1679 	memset(&crtc_state->dpll_hw_state, 0,
1680 	       sizeof(crtc_state->dpll_hw_state));
1681 
1682 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1683 
1684 	return true;
1685 }
1686 
1687 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1688 				  const struct intel_shared_dpll *pll,
1689 				  const struct intel_dpll_hw_state *pll_state)
1690 {
1691 	int link_clock = 0;
1692 
1693 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1694 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1695 	case DPLL_CTRL1_LINK_RATE_810:
1696 		link_clock = 81000;
1697 		break;
1698 	case DPLL_CTRL1_LINK_RATE_1080:
1699 		link_clock = 108000;
1700 		break;
1701 	case DPLL_CTRL1_LINK_RATE_1350:
1702 		link_clock = 135000;
1703 		break;
1704 	case DPLL_CTRL1_LINK_RATE_1620:
1705 		link_clock = 162000;
1706 		break;
1707 	case DPLL_CTRL1_LINK_RATE_2160:
1708 		link_clock = 216000;
1709 		break;
1710 	case DPLL_CTRL1_LINK_RATE_2700:
1711 		link_clock = 270000;
1712 		break;
1713 	default:
1714 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1715 		break;
1716 	}
1717 
1718 	return link_clock * 2;
1719 }
1720 
1721 static bool skl_get_dpll(struct intel_atomic_state *state,
1722 			 struct intel_crtc *crtc,
1723 			 struct intel_encoder *encoder)
1724 {
1725 	struct intel_crtc_state *crtc_state =
1726 		intel_atomic_get_new_crtc_state(state, crtc);
1727 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1728 	struct intel_shared_dpll *pll;
1729 	bool bret;
1730 
1731 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1732 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1733 		if (!bret) {
1734 			drm_dbg_kms(&i915->drm,
1735 				    "Could not get HDMI pll dividers.\n");
1736 			return false;
1737 		}
1738 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1739 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1740 		if (!bret) {
1741 			drm_dbg_kms(&i915->drm,
1742 				    "Could not set DP dpll HW state.\n");
1743 			return false;
1744 		}
1745 	} else {
1746 		return false;
1747 	}
1748 
1749 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1750 		pll = intel_find_shared_dpll(state, crtc,
1751 					     &crtc_state->dpll_hw_state,
1752 					     BIT(DPLL_ID_SKL_DPLL0));
1753 	else
1754 		pll = intel_find_shared_dpll(state, crtc,
1755 					     &crtc_state->dpll_hw_state,
1756 					     BIT(DPLL_ID_SKL_DPLL3) |
1757 					     BIT(DPLL_ID_SKL_DPLL2) |
1758 					     BIT(DPLL_ID_SKL_DPLL1));
1759 	if (!pll)
1760 		return false;
1761 
1762 	intel_reference_shared_dpll(state, crtc,
1763 				    pll, &crtc_state->dpll_hw_state);
1764 
1765 	crtc_state->shared_dpll = pll;
1766 
1767 	return true;
1768 }
1769 
1770 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1771 				const struct intel_shared_dpll *pll,
1772 				const struct intel_dpll_hw_state *pll_state)
1773 {
1774 	/*
1775 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1776 	 * the internal shift for each field
1777 	 */
1778 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1779 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1780 	else
1781 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1782 }
1783 
1784 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1785 {
1786 	/* No SSC ref */
1787 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1788 }
1789 
1790 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1791 			      const struct intel_dpll_hw_state *hw_state)
1792 {
1793 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1794 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1795 		      hw_state->ctrl1,
1796 		      hw_state->cfgcr1,
1797 		      hw_state->cfgcr2);
1798 }
1799 
1800 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1801 	.enable = skl_ddi_pll_enable,
1802 	.disable = skl_ddi_pll_disable,
1803 	.get_hw_state = skl_ddi_pll_get_hw_state,
1804 	.get_freq = skl_ddi_pll_get_freq,
1805 };
1806 
1807 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1808 	.enable = skl_ddi_dpll0_enable,
1809 	.disable = skl_ddi_dpll0_disable,
1810 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1811 	.get_freq = skl_ddi_pll_get_freq,
1812 };
1813 
1814 static const struct dpll_info skl_plls[] = {
1815 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1816 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1817 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1818 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1819 	{ },
1820 };
1821 
1822 static const struct intel_dpll_mgr skl_pll_mgr = {
1823 	.dpll_info = skl_plls,
1824 	.get_dplls = skl_get_dpll,
1825 	.put_dplls = intel_put_dpll,
1826 	.update_ref_clks = skl_update_dpll_ref_clks,
1827 	.dump_hw_state = skl_dump_hw_state,
1828 };
1829 
1830 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1831 				struct intel_shared_dpll *pll)
1832 {
1833 	u32 temp;
1834 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1835 	enum dpio_phy phy;
1836 	enum dpio_channel ch;
1837 
1838 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1839 
1840 	/* Non-SSC reference */
1841 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1842 	temp |= PORT_PLL_REF_SEL;
1843 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1844 
1845 	if (IS_GEMINILAKE(dev_priv)) {
1846 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1847 		temp |= PORT_PLL_POWER_ENABLE;
1848 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1849 
1850 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1851 				 PORT_PLL_POWER_STATE), 200))
1852 			drm_err(&dev_priv->drm,
1853 				"Power state not set for PLL:%d\n", port);
1854 	}
1855 
1856 	/* Disable 10 bit clock */
1857 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1858 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1859 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1860 
1861 	/* Write P1 & P2 */
1862 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1863 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1864 	temp |= pll->state.hw_state.ebb0;
1865 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1866 
1867 	/* Write M2 integer */
1868 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1869 	temp &= ~PORT_PLL_M2_MASK;
1870 	temp |= pll->state.hw_state.pll0;
1871 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1872 
1873 	/* Write N */
1874 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1875 	temp &= ~PORT_PLL_N_MASK;
1876 	temp |= pll->state.hw_state.pll1;
1877 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1878 
1879 	/* Write M2 fraction */
1880 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1881 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1882 	temp |= pll->state.hw_state.pll2;
1883 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1884 
1885 	/* Write M2 fraction enable */
1886 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1887 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1888 	temp |= pll->state.hw_state.pll3;
1889 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1890 
1891 	/* Write coeff */
1892 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1893 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1894 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1895 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1896 	temp |= pll->state.hw_state.pll6;
1897 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1898 
1899 	/* Write calibration val */
1900 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1901 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1902 	temp |= pll->state.hw_state.pll8;
1903 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1904 
1905 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1906 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1907 	temp |= pll->state.hw_state.pll9;
1908 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1909 
1910 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1911 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1912 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1913 	temp |= pll->state.hw_state.pll10;
1914 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1915 
1916 	/* Recalibrate with new settings */
1917 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1918 	temp |= PORT_PLL_RECALIBRATE;
1919 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1920 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1921 	temp |= pll->state.hw_state.ebb4;
1922 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1923 
1924 	/* Enable PLL */
1925 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1926 	temp |= PORT_PLL_ENABLE;
1927 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1928 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1929 
1930 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1931 			200))
1932 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1933 
1934 	if (IS_GEMINILAKE(dev_priv)) {
1935 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1936 		temp |= DCC_DELAY_RANGE_2;
1937 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1938 	}
1939 
1940 	/*
1941 	 * While we write to the group register to program all lanes at once we
1942 	 * can read only lane registers and we pick lanes 0/1 for that.
1943 	 */
1944 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1945 	temp &= ~LANE_STAGGER_MASK;
1946 	temp &= ~LANESTAGGER_STRAP_OVRD;
1947 	temp |= pll->state.hw_state.pcsdw12;
1948 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1949 }
1950 
1951 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1952 					struct intel_shared_dpll *pll)
1953 {
1954 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1955 	u32 temp;
1956 
1957 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1958 	temp &= ~PORT_PLL_ENABLE;
1959 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1960 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1961 
1962 	if (IS_GEMINILAKE(dev_priv)) {
1963 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1964 		temp &= ~PORT_PLL_POWER_ENABLE;
1965 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1966 
1967 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1968 				  PORT_PLL_POWER_STATE), 200))
1969 			drm_err(&dev_priv->drm,
1970 				"Power state not reset for PLL:%d\n", port);
1971 	}
1972 }
1973 
1974 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1975 					struct intel_shared_dpll *pll,
1976 					struct intel_dpll_hw_state *hw_state)
1977 {
1978 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1979 	intel_wakeref_t wakeref;
1980 	enum dpio_phy phy;
1981 	enum dpio_channel ch;
1982 	u32 val;
1983 	bool ret;
1984 
1985 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1986 
1987 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1988 						     POWER_DOMAIN_DISPLAY_CORE);
1989 	if (!wakeref)
1990 		return false;
1991 
1992 	ret = false;
1993 
1994 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1995 	if (!(val & PORT_PLL_ENABLE))
1996 		goto out;
1997 
1998 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1999 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2000 
2001 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2002 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2003 
2004 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2005 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2006 
2007 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2008 	hw_state->pll1 &= PORT_PLL_N_MASK;
2009 
2010 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2011 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2012 
2013 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2014 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2015 
2016 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2017 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2018 			  PORT_PLL_INT_COEFF_MASK |
2019 			  PORT_PLL_GAIN_CTL_MASK;
2020 
2021 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2022 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2023 
2024 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2025 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2026 
2027 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2028 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2029 			   PORT_PLL_DCO_AMP_MASK;
2030 
2031 	/*
2032 	 * While we write to the group register to program all lanes at once we
2033 	 * can read only lane registers. We configure all lanes the same way, so
2034 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2035 	 */
2036 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2037 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2038 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2039 		drm_dbg(&dev_priv->drm,
2040 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2041 			hw_state->pcsdw12,
2042 			intel_de_read(dev_priv,
2043 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2044 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2045 
2046 	ret = true;
2047 
2048 out:
2049 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2050 
2051 	return ret;
2052 }
2053 
2054 /* bxt clock parameters */
2055 struct bxt_clk_div {
2056 	int clock;
2057 	u32 p1;
2058 	u32 p2;
2059 	u32 m2_int;
2060 	u32 m2_frac;
2061 	bool m2_frac_en;
2062 	u32 n;
2063 
2064 	int vco;
2065 };
2066 
2067 /* pre-calculated values for DP linkrates */
2068 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2069 	{162000, 4, 2, 32, 1677722, 1, 1},
2070 	{270000, 4, 1, 27,       0, 0, 1},
2071 	{540000, 2, 1, 27,       0, 0, 1},
2072 	{216000, 3, 2, 32, 1677722, 1, 1},
2073 	{243000, 4, 1, 24, 1258291, 1, 1},
2074 	{324000, 4, 1, 32, 1677722, 1, 1},
2075 	{432000, 3, 1, 32, 1677722, 1, 1}
2076 };
2077 
2078 static bool
2079 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2080 			  struct bxt_clk_div *clk_div)
2081 {
2082 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2083 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2084 	struct dpll best_clock;
2085 
2086 	/* Calculate HDMI div */
2087 	/*
2088 	 * FIXME: tie the following calculation into
2089 	 * i9xx_crtc_compute_clock
2090 	 */
2091 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2092 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2093 			crtc_state->port_clock,
2094 			pipe_name(crtc->pipe));
2095 		return false;
2096 	}
2097 
2098 	clk_div->p1 = best_clock.p1;
2099 	clk_div->p2 = best_clock.p2;
2100 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2101 	clk_div->n = best_clock.n;
2102 	clk_div->m2_int = best_clock.m2 >> 22;
2103 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2104 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2105 
2106 	clk_div->vco = best_clock.vco;
2107 
2108 	return true;
2109 }
2110 
2111 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2112 				    struct bxt_clk_div *clk_div)
2113 {
2114 	int clock = crtc_state->port_clock;
2115 	int i;
2116 
2117 	*clk_div = bxt_dp_clk_val[0];
2118 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2119 		if (bxt_dp_clk_val[i].clock == clock) {
2120 			*clk_div = bxt_dp_clk_val[i];
2121 			break;
2122 		}
2123 	}
2124 
2125 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2126 }
2127 
2128 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2129 				      const struct bxt_clk_div *clk_div)
2130 {
2131 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2132 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2133 	int clock = crtc_state->port_clock;
2134 	int vco = clk_div->vco;
2135 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2136 	u32 lanestagger;
2137 
2138 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2139 
2140 	if (vco >= 6200000 && vco <= 6700000) {
2141 		prop_coef = 4;
2142 		int_coef = 9;
2143 		gain_ctl = 3;
2144 		targ_cnt = 8;
2145 	} else if ((vco > 5400000 && vco < 6200000) ||
2146 			(vco >= 4800000 && vco < 5400000)) {
2147 		prop_coef = 5;
2148 		int_coef = 11;
2149 		gain_ctl = 3;
2150 		targ_cnt = 9;
2151 	} else if (vco == 5400000) {
2152 		prop_coef = 3;
2153 		int_coef = 8;
2154 		gain_ctl = 1;
2155 		targ_cnt = 9;
2156 	} else {
2157 		drm_err(&i915->drm, "Invalid VCO\n");
2158 		return false;
2159 	}
2160 
2161 	if (clock > 270000)
2162 		lanestagger = 0x18;
2163 	else if (clock > 135000)
2164 		lanestagger = 0x0d;
2165 	else if (clock > 67000)
2166 		lanestagger = 0x07;
2167 	else if (clock > 33000)
2168 		lanestagger = 0x04;
2169 	else
2170 		lanestagger = 0x02;
2171 
2172 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2173 	dpll_hw_state->pll0 = clk_div->m2_int;
2174 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2175 	dpll_hw_state->pll2 = clk_div->m2_frac;
2176 
2177 	if (clk_div->m2_frac_en)
2178 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2179 
2180 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2181 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2182 
2183 	dpll_hw_state->pll8 = targ_cnt;
2184 
2185 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2186 
2187 	dpll_hw_state->pll10 =
2188 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2189 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2190 
2191 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2192 
2193 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2194 
2195 	return true;
2196 }
2197 
2198 static bool
2199 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2200 {
2201 	struct bxt_clk_div clk_div = {};
2202 
2203 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2204 
2205 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2206 }
2207 
2208 static bool
2209 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2210 {
2211 	struct bxt_clk_div clk_div = {};
2212 
2213 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2214 
2215 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2216 }
2217 
2218 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2219 				const struct intel_shared_dpll *pll,
2220 				const struct intel_dpll_hw_state *pll_state)
2221 {
2222 	struct dpll clock;
2223 
2224 	clock.m1 = 2;
2225 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2226 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2227 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2228 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2229 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2230 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2231 
2232 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2233 }
2234 
2235 static bool bxt_get_dpll(struct intel_atomic_state *state,
2236 			 struct intel_crtc *crtc,
2237 			 struct intel_encoder *encoder)
2238 {
2239 	struct intel_crtc_state *crtc_state =
2240 		intel_atomic_get_new_crtc_state(state, crtc);
2241 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2242 	struct intel_shared_dpll *pll;
2243 	enum intel_dpll_id id;
2244 
2245 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2246 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2247 		return false;
2248 
2249 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2250 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2251 		return false;
2252 
2253 	/* 1:1 mapping between ports and PLLs */
2254 	id = (enum intel_dpll_id) encoder->port;
2255 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2256 
2257 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2258 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2259 
2260 	intel_reference_shared_dpll(state, crtc,
2261 				    pll, &crtc_state->dpll_hw_state);
2262 
2263 	crtc_state->shared_dpll = pll;
2264 
2265 	return true;
2266 }
2267 
2268 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2269 {
2270 	i915->dpll.ref_clks.ssc = 100000;
2271 	i915->dpll.ref_clks.nssc = 100000;
2272 	/* DSI non-SSC ref 19.2MHz */
2273 }
2274 
2275 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2276 			      const struct intel_dpll_hw_state *hw_state)
2277 {
2278 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2279 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2280 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2281 		    hw_state->ebb0,
2282 		    hw_state->ebb4,
2283 		    hw_state->pll0,
2284 		    hw_state->pll1,
2285 		    hw_state->pll2,
2286 		    hw_state->pll3,
2287 		    hw_state->pll6,
2288 		    hw_state->pll8,
2289 		    hw_state->pll9,
2290 		    hw_state->pll10,
2291 		    hw_state->pcsdw12);
2292 }
2293 
2294 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2295 	.enable = bxt_ddi_pll_enable,
2296 	.disable = bxt_ddi_pll_disable,
2297 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2298 	.get_freq = bxt_ddi_pll_get_freq,
2299 };
2300 
2301 static const struct dpll_info bxt_plls[] = {
2302 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2303 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2304 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2305 	{ },
2306 };
2307 
2308 static const struct intel_dpll_mgr bxt_pll_mgr = {
2309 	.dpll_info = bxt_plls,
2310 	.get_dplls = bxt_get_dpll,
2311 	.put_dplls = intel_put_dpll,
2312 	.update_ref_clks = bxt_update_dpll_ref_clks,
2313 	.dump_hw_state = bxt_dump_hw_state,
2314 };
2315 
2316 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2317 				      int *qdiv, int *kdiv)
2318 {
2319 	/* even dividers */
2320 	if (bestdiv % 2 == 0) {
2321 		if (bestdiv == 2) {
2322 			*pdiv = 2;
2323 			*qdiv = 1;
2324 			*kdiv = 1;
2325 		} else if (bestdiv % 4 == 0) {
2326 			*pdiv = 2;
2327 			*qdiv = bestdiv / 4;
2328 			*kdiv = 2;
2329 		} else if (bestdiv % 6 == 0) {
2330 			*pdiv = 3;
2331 			*qdiv = bestdiv / 6;
2332 			*kdiv = 2;
2333 		} else if (bestdiv % 5 == 0) {
2334 			*pdiv = 5;
2335 			*qdiv = bestdiv / 10;
2336 			*kdiv = 2;
2337 		} else if (bestdiv % 14 == 0) {
2338 			*pdiv = 7;
2339 			*qdiv = bestdiv / 14;
2340 			*kdiv = 2;
2341 		}
2342 	} else {
2343 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2344 			*pdiv = bestdiv;
2345 			*qdiv = 1;
2346 			*kdiv = 1;
2347 		} else { /* 9, 15, 21 */
2348 			*pdiv = bestdiv / 3;
2349 			*qdiv = 1;
2350 			*kdiv = 3;
2351 		}
2352 	}
2353 }
2354 
2355 static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2356 				      u32 dco_freq, u32 ref_freq,
2357 				      int pdiv, int qdiv, int kdiv)
2358 {
2359 	u32 dco;
2360 
2361 	switch (kdiv) {
2362 	case 1:
2363 		params->kdiv = 1;
2364 		break;
2365 	case 2:
2366 		params->kdiv = 2;
2367 		break;
2368 	case 3:
2369 		params->kdiv = 4;
2370 		break;
2371 	default:
2372 		WARN(1, "Incorrect KDiv\n");
2373 	}
2374 
2375 	switch (pdiv) {
2376 	case 2:
2377 		params->pdiv = 1;
2378 		break;
2379 	case 3:
2380 		params->pdiv = 2;
2381 		break;
2382 	case 5:
2383 		params->pdiv = 4;
2384 		break;
2385 	case 7:
2386 		params->pdiv = 8;
2387 		break;
2388 	default:
2389 		WARN(1, "Incorrect PDiv\n");
2390 	}
2391 
2392 	WARN_ON(kdiv != 2 && qdiv != 1);
2393 
2394 	params->qdiv_ratio = qdiv;
2395 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2396 
2397 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2398 
2399 	params->dco_integer = dco >> 15;
2400 	params->dco_fraction = dco & 0x7fff;
2401 }
2402 
2403 /*
2404  * Display WA #22010492432: ehl, tgl, adl-p
2405  * Program half of the nominal DCO divider fraction value.
2406  */
2407 static bool
2408 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2409 {
2410 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2411 		 IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2412 		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
2413 		 i915->dpll.ref_clks.nssc == 38400;
2414 }
2415 
2416 struct icl_combo_pll_params {
2417 	int clock;
2418 	struct skl_wrpll_params wrpll;
2419 };
2420 
2421 /*
2422  * These values alrea already adjusted: they're the bits we write to the
2423  * registers, not the logical values.
2424  */
2425 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2426 	{ 540000,
2427 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2428 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2429 	{ 270000,
2430 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2431 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2432 	{ 162000,
2433 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2434 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2435 	{ 324000,
2436 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2437 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2438 	{ 216000,
2439 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2440 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2441 	{ 432000,
2442 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2443 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2444 	{ 648000,
2445 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2446 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2447 	{ 810000,
2448 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2449 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2450 };
2451 
2452 
2453 /* Also used for 38.4 MHz values. */
2454 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2455 	{ 540000,
2456 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2457 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2458 	{ 270000,
2459 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2460 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2461 	{ 162000,
2462 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2463 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2464 	{ 324000,
2465 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2466 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2467 	{ 216000,
2468 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2469 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2470 	{ 432000,
2471 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2472 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2473 	{ 648000,
2474 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2475 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2476 	{ 810000,
2477 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2478 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2479 };
2480 
2481 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2482 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2483 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2484 };
2485 
2486 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2487 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2488 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2489 };
2490 
2491 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2492 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2493 	/* the following params are unused */
2494 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2495 };
2496 
2497 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2498 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2499 	/* the following params are unused */
2500 };
2501 
2502 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2503 				  struct skl_wrpll_params *pll_params)
2504 {
2505 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2506 	const struct icl_combo_pll_params *params =
2507 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2508 		icl_dp_combo_pll_24MHz_values :
2509 		icl_dp_combo_pll_19_2MHz_values;
2510 	int clock = crtc_state->port_clock;
2511 	int i;
2512 
2513 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2514 		if (clock == params[i].clock) {
2515 			*pll_params = params[i].wrpll;
2516 			return true;
2517 		}
2518 	}
2519 
2520 	MISSING_CASE(clock);
2521 	return false;
2522 }
2523 
2524 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2525 			     struct skl_wrpll_params *pll_params)
2526 {
2527 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2528 
2529 	if (DISPLAY_VER(dev_priv) >= 12) {
2530 		switch (dev_priv->dpll.ref_clks.nssc) {
2531 		default:
2532 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2533 			fallthrough;
2534 		case 19200:
2535 		case 38400:
2536 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2537 			break;
2538 		case 24000:
2539 			*pll_params = tgl_tbt_pll_24MHz_values;
2540 			break;
2541 		}
2542 	} else {
2543 		switch (dev_priv->dpll.ref_clks.nssc) {
2544 		default:
2545 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2546 			fallthrough;
2547 		case 19200:
2548 		case 38400:
2549 			*pll_params = icl_tbt_pll_19_2MHz_values;
2550 			break;
2551 		case 24000:
2552 			*pll_params = icl_tbt_pll_24MHz_values;
2553 			break;
2554 		}
2555 	}
2556 
2557 	return true;
2558 }
2559 
2560 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2561 				    const struct intel_shared_dpll *pll,
2562 				    const struct intel_dpll_hw_state *pll_state)
2563 {
2564 	/*
2565 	 * The PLL outputs multiple frequencies at the same time, selection is
2566 	 * made at DDI clock mux level.
2567 	 */
2568 	drm_WARN_ON(&i915->drm, 1);
2569 
2570 	return 0;
2571 }
2572 
2573 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
2574 {
2575 	int ref_clock = i915->dpll.ref_clks.nssc;
2576 
2577 	/*
2578 	 * For ICL+, the spec states: if reference frequency is 38.4,
2579 	 * use 19.2 because the DPLL automatically divides that by 2.
2580 	 */
2581 	if (ref_clock == 38400)
2582 		ref_clock = 19200;
2583 
2584 	return ref_clock;
2585 }
2586 
2587 static bool
2588 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
2589 	       struct skl_wrpll_params *wrpll_params)
2590 {
2591 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2592 	int ref_clock = icl_wrpll_ref_clock(i915);
2593 	u32 afe_clock = crtc_state->port_clock * 5;
2594 	u32 dco_min = 7998000;
2595 	u32 dco_max = 10000000;
2596 	u32 dco_mid = (dco_min + dco_max) / 2;
2597 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2598 					 18, 20, 24, 28, 30, 32,  36,  40,
2599 					 42, 44, 48, 50, 52, 54,  56,  60,
2600 					 64, 66, 68, 70, 72, 76,  78,  80,
2601 					 84, 88, 90, 92, 96, 98, 100, 102,
2602 					  3,  5,  7,  9, 15, 21 };
2603 	u32 dco, best_dco = 0, dco_centrality = 0;
2604 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2605 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2606 
2607 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2608 		dco = afe_clock * dividers[d];
2609 
2610 		if (dco <= dco_max && dco >= dco_min) {
2611 			dco_centrality = abs(dco - dco_mid);
2612 
2613 			if (dco_centrality < best_dco_centrality) {
2614 				best_dco_centrality = dco_centrality;
2615 				best_div = dividers[d];
2616 				best_dco = dco;
2617 			}
2618 		}
2619 	}
2620 
2621 	if (best_div == 0)
2622 		return false;
2623 
2624 	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2625 	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2626 				  pdiv, qdiv, kdiv);
2627 
2628 	return true;
2629 }
2630 
2631 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2632 				      const struct intel_shared_dpll *pll,
2633 				      const struct intel_dpll_hw_state *pll_state)
2634 {
2635 	int ref_clock = icl_wrpll_ref_clock(i915);
2636 	u32 dco_fraction;
2637 	u32 p0, p1, p2, dco_freq;
2638 
2639 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2640 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2641 
2642 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2643 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2644 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2645 	else
2646 		p1 = 1;
2647 
2648 	switch (p0) {
2649 	case DPLL_CFGCR1_PDIV_2:
2650 		p0 = 2;
2651 		break;
2652 	case DPLL_CFGCR1_PDIV_3:
2653 		p0 = 3;
2654 		break;
2655 	case DPLL_CFGCR1_PDIV_5:
2656 		p0 = 5;
2657 		break;
2658 	case DPLL_CFGCR1_PDIV_7:
2659 		p0 = 7;
2660 		break;
2661 	}
2662 
2663 	switch (p2) {
2664 	case DPLL_CFGCR1_KDIV_1:
2665 		p2 = 1;
2666 		break;
2667 	case DPLL_CFGCR1_KDIV_2:
2668 		p2 = 2;
2669 		break;
2670 	case DPLL_CFGCR1_KDIV_3:
2671 		p2 = 3;
2672 		break;
2673 	}
2674 
2675 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2676 		   ref_clock;
2677 
2678 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2679 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2680 
2681 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2682 		dco_fraction *= 2;
2683 
2684 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2685 
2686 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
2687 		return 0;
2688 
2689 	return dco_freq / (p0 * p1 * p2 * 5);
2690 }
2691 
2692 static void icl_calc_dpll_state(struct drm_i915_private *i915,
2693 				const struct skl_wrpll_params *pll_params,
2694 				struct intel_dpll_hw_state *pll_state)
2695 {
2696 	u32 dco_fraction = pll_params->dco_fraction;
2697 
2698 	memset(pll_state, 0, sizeof(*pll_state));
2699 
2700 	if (ehl_combo_pll_div_frac_wa_needed(i915))
2701 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
2702 
2703 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2704 			    pll_params->dco_integer;
2705 
2706 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
2707 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
2708 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
2709 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2710 
2711 	if (DISPLAY_VER(i915) >= 12)
2712 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2713 	else
2714 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2715 }
2716 
2717 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2718 				     u32 *target_dco_khz,
2719 				     struct intel_dpll_hw_state *state,
2720 				     bool is_dkl)
2721 {
2722 	u32 dco_min_freq, dco_max_freq;
2723 	int div1_vals[] = {7, 5, 3, 2};
2724 	unsigned int i;
2725 	int div2;
2726 
2727 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2728 	dco_max_freq = is_dp ? 8100000 : 10000000;
2729 
2730 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2731 		int div1 = div1_vals[i];
2732 
2733 		for (div2 = 10; div2 > 0; div2--) {
2734 			int dco = div1 * div2 * clock_khz * 5;
2735 			int a_divratio, tlinedrv, inputsel;
2736 			u32 hsdiv;
2737 
2738 			if (dco < dco_min_freq || dco > dco_max_freq)
2739 				continue;
2740 
2741 			if (div2 >= 2) {
2742 				/*
2743 				 * Note: a_divratio not matching TGL BSpec
2744 				 * algorithm but matching hardcoded values and
2745 				 * working on HW for DP alt-mode at least
2746 				 */
2747 				a_divratio = is_dp ? 10 : 5;
2748 				tlinedrv = is_dkl ? 1 : 2;
2749 			} else {
2750 				a_divratio = 5;
2751 				tlinedrv = 0;
2752 			}
2753 			inputsel = is_dp ? 0 : 1;
2754 
2755 			switch (div1) {
2756 			default:
2757 				MISSING_CASE(div1);
2758 				fallthrough;
2759 			case 2:
2760 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2761 				break;
2762 			case 3:
2763 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2764 				break;
2765 			case 5:
2766 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2767 				break;
2768 			case 7:
2769 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2770 				break;
2771 			}
2772 
2773 			*target_dco_khz = dco;
2774 
2775 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2776 
2777 			state->mg_clktop2_coreclkctl1 =
2778 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2779 
2780 			state->mg_clktop2_hsclkctl =
2781 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2782 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2783 				hsdiv |
2784 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2785 
2786 			return true;
2787 		}
2788 	}
2789 
2790 	return false;
2791 }
2792 
2793 /*
2794  * The specification for this function uses real numbers, so the math had to be
2795  * adapted to integer-only calculation, that's why it looks so different.
2796  */
2797 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2798 				  struct intel_dpll_hw_state *pll_state)
2799 {
2800 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2801 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
2802 	int clock = crtc_state->port_clock;
2803 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2804 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2805 	u32 prop_coeff, int_coeff;
2806 	u32 tdc_targetcnt, feedfwgain;
2807 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2808 	u64 tmp;
2809 	bool use_ssc = false;
2810 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2811 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2812 
2813 	memset(pll_state, 0, sizeof(*pll_state));
2814 
2815 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2816 				      pll_state, is_dkl)) {
2817 		drm_dbg_kms(&dev_priv->drm,
2818 			    "Failed to find divisors for clock %d\n", clock);
2819 		return false;
2820 	}
2821 
2822 	m1div = 2;
2823 	m2div_int = dco_khz / (refclk_khz * m1div);
2824 	if (m2div_int > 255) {
2825 		if (!is_dkl) {
2826 			m1div = 4;
2827 			m2div_int = dco_khz / (refclk_khz * m1div);
2828 		}
2829 
2830 		if (m2div_int > 255) {
2831 			drm_dbg_kms(&dev_priv->drm,
2832 				    "Failed to find mdiv for clock %d\n",
2833 				    clock);
2834 			return false;
2835 		}
2836 	}
2837 	m2div_rem = dco_khz % (refclk_khz * m1div);
2838 
2839 	tmp = (u64)m2div_rem * (1 << 22);
2840 	do_div(tmp, refclk_khz * m1div);
2841 	m2div_frac = tmp;
2842 
2843 	switch (refclk_khz) {
2844 	case 19200:
2845 		iref_ndiv = 1;
2846 		iref_trim = 28;
2847 		iref_pulse_w = 1;
2848 		break;
2849 	case 24000:
2850 		iref_ndiv = 1;
2851 		iref_trim = 25;
2852 		iref_pulse_w = 2;
2853 		break;
2854 	case 38400:
2855 		iref_ndiv = 2;
2856 		iref_trim = 28;
2857 		iref_pulse_w = 1;
2858 		break;
2859 	default:
2860 		MISSING_CASE(refclk_khz);
2861 		return false;
2862 	}
2863 
2864 	/*
2865 	 * tdc_res = 0.000003
2866 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2867 	 *
2868 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2869 	 * was supposed to be a division, but we rearranged the operations of
2870 	 * the formula to avoid early divisions so we don't multiply the
2871 	 * rounding errors.
2872 	 *
2873 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2874 	 * we also rearrange to work with integers.
2875 	 *
2876 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2877 	 * last division by 10.
2878 	 */
2879 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2880 
2881 	/*
2882 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2883 	 * 32 bits. That's not a problem since we round the division down
2884 	 * anyway.
2885 	 */
2886 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2887 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2888 
2889 	if (dco_khz >= 9000000) {
2890 		prop_coeff = 5;
2891 		int_coeff = 10;
2892 	} else {
2893 		prop_coeff = 4;
2894 		int_coeff = 8;
2895 	}
2896 
2897 	if (use_ssc) {
2898 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2899 		do_div(tmp, refclk_khz * m1div * 10000);
2900 		ssc_stepsize = tmp;
2901 
2902 		tmp = mul_u32_u32(dco_khz, 1000);
2903 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2904 	} else {
2905 		ssc_stepsize = 0;
2906 		ssc_steplen = 0;
2907 	}
2908 	ssc_steplog = 4;
2909 
2910 	/* write pll_state calculations */
2911 	if (is_dkl) {
2912 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2913 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2914 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2915 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2916 
2917 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2918 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2919 
2920 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2921 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2922 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2923 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2924 
2925 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2926 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2927 
2928 		pll_state->mg_pll_tdc_coldst_bias =
2929 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2930 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2931 
2932 	} else {
2933 		pll_state->mg_pll_div0 =
2934 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2935 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2936 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2937 
2938 		pll_state->mg_pll_div1 =
2939 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2940 			MG_PLL_DIV1_DITHER_DIV_2 |
2941 			MG_PLL_DIV1_NDIVRATIO(1) |
2942 			MG_PLL_DIV1_FBPREDIV(m1div);
2943 
2944 		pll_state->mg_pll_lf =
2945 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2946 			MG_PLL_LF_AFCCNTSEL_512 |
2947 			MG_PLL_LF_GAINCTRL(1) |
2948 			MG_PLL_LF_INT_COEFF(int_coeff) |
2949 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2950 
2951 		pll_state->mg_pll_frac_lock =
2952 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2953 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2954 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2955 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2956 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2957 		if (use_ssc || m2div_rem > 0)
2958 			pll_state->mg_pll_frac_lock |=
2959 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2960 
2961 		pll_state->mg_pll_ssc =
2962 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2963 			MG_PLL_SSC_TYPE(2) |
2964 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2965 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2966 			MG_PLL_SSC_FLLEN |
2967 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2968 
2969 		pll_state->mg_pll_tdc_coldst_bias =
2970 			MG_PLL_TDC_COLDST_COLDSTART |
2971 			MG_PLL_TDC_COLDST_IREFINT_EN |
2972 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2973 			MG_PLL_TDC_TDCOVCCORR_EN |
2974 			MG_PLL_TDC_TDCSEL(3);
2975 
2976 		pll_state->mg_pll_bias =
2977 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
2978 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2979 			MG_PLL_BIAS_BIAS_BONUS(10) |
2980 			MG_PLL_BIAS_BIASCAL_EN |
2981 			MG_PLL_BIAS_CTRIM(12) |
2982 			MG_PLL_BIAS_VREF_RDAC(4) |
2983 			MG_PLL_BIAS_IREFTRIM(iref_trim);
2984 
2985 		if (refclk_khz == 38400) {
2986 			pll_state->mg_pll_tdc_coldst_bias_mask =
2987 				MG_PLL_TDC_COLDST_COLDSTART;
2988 			pll_state->mg_pll_bias_mask = 0;
2989 		} else {
2990 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2991 			pll_state->mg_pll_bias_mask = -1U;
2992 		}
2993 
2994 		pll_state->mg_pll_tdc_coldst_bias &=
2995 			pll_state->mg_pll_tdc_coldst_bias_mask;
2996 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2997 	}
2998 
2999 	return true;
3000 }
3001 
3002 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3003 				   const struct intel_shared_dpll *pll,
3004 				   const struct intel_dpll_hw_state *pll_state)
3005 {
3006 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3007 	u64 tmp;
3008 
3009 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3010 
3011 	if (DISPLAY_VER(dev_priv) >= 12) {
3012 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3013 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3014 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3015 
3016 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3017 			m2_frac = pll_state->mg_pll_bias &
3018 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3019 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3020 		} else {
3021 			m2_frac = 0;
3022 		}
3023 	} else {
3024 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3025 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3026 
3027 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3028 			m2_frac = pll_state->mg_pll_div0 &
3029 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3030 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3031 		} else {
3032 			m2_frac = 0;
3033 		}
3034 	}
3035 
3036 	switch (pll_state->mg_clktop2_hsclkctl &
3037 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3038 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3039 		div1 = 2;
3040 		break;
3041 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3042 		div1 = 3;
3043 		break;
3044 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3045 		div1 = 5;
3046 		break;
3047 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3048 		div1 = 7;
3049 		break;
3050 	default:
3051 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3052 		return 0;
3053 	}
3054 
3055 	div2 = (pll_state->mg_clktop2_hsclkctl &
3056 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3057 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3058 
3059 	/* div2 value of 0 is same as 1 means no div */
3060 	if (div2 == 0)
3061 		div2 = 1;
3062 
3063 	/*
3064 	 * Adjust the original formula to delay the division by 2^22 in order to
3065 	 * minimize possible rounding errors.
3066 	 */
3067 	tmp = (u64)m1 * m2_int * ref_clock +
3068 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3069 	tmp = div_u64(tmp, 5 * div1 * div2);
3070 
3071 	return tmp;
3072 }
3073 
3074 /**
3075  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3076  * @crtc_state: state for the CRTC to select the DPLL for
3077  * @port_dpll_id: the active @port_dpll_id to select
3078  *
3079  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3080  * CRTC.
3081  */
3082 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3083 			      enum icl_port_dpll_id port_dpll_id)
3084 {
3085 	struct icl_port_dpll *port_dpll =
3086 		&crtc_state->icl_port_dplls[port_dpll_id];
3087 
3088 	crtc_state->shared_dpll = port_dpll->pll;
3089 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3090 }
3091 
3092 static void icl_update_active_dpll(struct intel_atomic_state *state,
3093 				   struct intel_crtc *crtc,
3094 				   struct intel_encoder *encoder)
3095 {
3096 	struct intel_crtc_state *crtc_state =
3097 		intel_atomic_get_new_crtc_state(state, crtc);
3098 	struct intel_digital_port *primary_port;
3099 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3100 
3101 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3102 		enc_to_mst(encoder)->primary :
3103 		enc_to_dig_port(encoder);
3104 
3105 	if (primary_port &&
3106 	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
3107 	     intel_tc_port_in_legacy_mode(primary_port)))
3108 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3109 
3110 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3111 }
3112 
3113 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3114 {
3115 	if (!(i915->hti_state & HDPORT_ENABLED))
3116 		return 0;
3117 
3118 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3119 }
3120 
3121 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3122 				   struct intel_crtc *crtc,
3123 				   struct intel_encoder *encoder)
3124 {
3125 	struct intel_crtc_state *crtc_state =
3126 		intel_atomic_get_new_crtc_state(state, crtc);
3127 	struct skl_wrpll_params pll_params = { };
3128 	struct icl_port_dpll *port_dpll =
3129 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3130 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3131 	enum port port = encoder->port;
3132 	unsigned long dpll_mask;
3133 	int ret;
3134 
3135 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3136 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3137 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3138 	else
3139 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3140 
3141 	if (!ret) {
3142 		drm_dbg_kms(&dev_priv->drm,
3143 			    "Could not calculate combo PHY PLL state.\n");
3144 
3145 		return false;
3146 	}
3147 
3148 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3149 
3150 	if (IS_ALDERLAKE_S(dev_priv)) {
3151 		dpll_mask =
3152 			BIT(DPLL_ID_DG1_DPLL3) |
3153 			BIT(DPLL_ID_DG1_DPLL2) |
3154 			BIT(DPLL_ID_ICL_DPLL1) |
3155 			BIT(DPLL_ID_ICL_DPLL0);
3156 	} else if (IS_DG1(dev_priv)) {
3157 		if (port == PORT_D || port == PORT_E) {
3158 			dpll_mask =
3159 				BIT(DPLL_ID_DG1_DPLL2) |
3160 				BIT(DPLL_ID_DG1_DPLL3);
3161 		} else {
3162 			dpll_mask =
3163 				BIT(DPLL_ID_DG1_DPLL0) |
3164 				BIT(DPLL_ID_DG1_DPLL1);
3165 		}
3166 	} else if (IS_ROCKETLAKE(dev_priv)) {
3167 		dpll_mask =
3168 			BIT(DPLL_ID_EHL_DPLL4) |
3169 			BIT(DPLL_ID_ICL_DPLL1) |
3170 			BIT(DPLL_ID_ICL_DPLL0);
3171 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3172 		dpll_mask =
3173 			BIT(DPLL_ID_EHL_DPLL4) |
3174 			BIT(DPLL_ID_ICL_DPLL1) |
3175 			BIT(DPLL_ID_ICL_DPLL0);
3176 	} else {
3177 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3178 	}
3179 
3180 	/* Eliminate DPLLs from consideration if reserved by HTI */
3181 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3182 
3183 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3184 						&port_dpll->hw_state,
3185 						dpll_mask);
3186 	if (!port_dpll->pll) {
3187 		drm_dbg_kms(&dev_priv->drm,
3188 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3189 			    encoder->base.base.id, encoder->base.name);
3190 		return false;
3191 	}
3192 
3193 	intel_reference_shared_dpll(state, crtc,
3194 				    port_dpll->pll, &port_dpll->hw_state);
3195 
3196 	icl_update_active_dpll(state, crtc, encoder);
3197 
3198 	return true;
3199 }
3200 
3201 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3202 				 struct intel_crtc *crtc,
3203 				 struct intel_encoder *encoder)
3204 {
3205 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3206 	struct intel_crtc_state *crtc_state =
3207 		intel_atomic_get_new_crtc_state(state, crtc);
3208 	struct skl_wrpll_params pll_params = { };
3209 	struct icl_port_dpll *port_dpll;
3210 	enum intel_dpll_id dpll_id;
3211 
3212 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3213 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3214 		drm_dbg_kms(&dev_priv->drm,
3215 			    "Could not calculate TBT PLL state.\n");
3216 		return false;
3217 	}
3218 
3219 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3220 
3221 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3222 						&port_dpll->hw_state,
3223 						BIT(DPLL_ID_ICL_TBTPLL));
3224 	if (!port_dpll->pll) {
3225 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3226 		return false;
3227 	}
3228 	intel_reference_shared_dpll(state, crtc,
3229 				    port_dpll->pll, &port_dpll->hw_state);
3230 
3231 
3232 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3233 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3234 		drm_dbg_kms(&dev_priv->drm,
3235 			    "Could not calculate MG PHY PLL state.\n");
3236 		goto err_unreference_tbt_pll;
3237 	}
3238 
3239 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3240 							 encoder->port));
3241 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3242 						&port_dpll->hw_state,
3243 						BIT(dpll_id));
3244 	if (!port_dpll->pll) {
3245 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3246 		goto err_unreference_tbt_pll;
3247 	}
3248 	intel_reference_shared_dpll(state, crtc,
3249 				    port_dpll->pll, &port_dpll->hw_state);
3250 
3251 	icl_update_active_dpll(state, crtc, encoder);
3252 
3253 	return true;
3254 
3255 err_unreference_tbt_pll:
3256 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3257 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3258 
3259 	return false;
3260 }
3261 
3262 static bool icl_get_dplls(struct intel_atomic_state *state,
3263 			  struct intel_crtc *crtc,
3264 			  struct intel_encoder *encoder)
3265 {
3266 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3267 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3268 
3269 	if (intel_phy_is_combo(dev_priv, phy))
3270 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3271 	else if (intel_phy_is_tc(dev_priv, phy))
3272 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3273 
3274 	MISSING_CASE(phy);
3275 
3276 	return false;
3277 }
3278 
3279 static void icl_put_dplls(struct intel_atomic_state *state,
3280 			  struct intel_crtc *crtc)
3281 {
3282 	const struct intel_crtc_state *old_crtc_state =
3283 		intel_atomic_get_old_crtc_state(state, crtc);
3284 	struct intel_crtc_state *new_crtc_state =
3285 		intel_atomic_get_new_crtc_state(state, crtc);
3286 	enum icl_port_dpll_id id;
3287 
3288 	new_crtc_state->shared_dpll = NULL;
3289 
3290 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3291 		const struct icl_port_dpll *old_port_dpll =
3292 			&old_crtc_state->icl_port_dplls[id];
3293 		struct icl_port_dpll *new_port_dpll =
3294 			&new_crtc_state->icl_port_dplls[id];
3295 
3296 		new_port_dpll->pll = NULL;
3297 
3298 		if (!old_port_dpll->pll)
3299 			continue;
3300 
3301 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3302 	}
3303 }
3304 
3305 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3306 				struct intel_shared_dpll *pll,
3307 				struct intel_dpll_hw_state *hw_state)
3308 {
3309 	const enum intel_dpll_id id = pll->info->id;
3310 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3311 	intel_wakeref_t wakeref;
3312 	bool ret = false;
3313 	u32 val;
3314 
3315 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3316 
3317 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3318 						     POWER_DOMAIN_DISPLAY_CORE);
3319 	if (!wakeref)
3320 		return false;
3321 
3322 	val = intel_de_read(dev_priv, enable_reg);
3323 	if (!(val & PLL_ENABLE))
3324 		goto out;
3325 
3326 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3327 						  MG_REFCLKIN_CTL(tc_port));
3328 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3329 
3330 	hw_state->mg_clktop2_coreclkctl1 =
3331 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3332 	hw_state->mg_clktop2_coreclkctl1 &=
3333 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3334 
3335 	hw_state->mg_clktop2_hsclkctl =
3336 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3337 	hw_state->mg_clktop2_hsclkctl &=
3338 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3339 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3340 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3341 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3342 
3343 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3344 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3345 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3346 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3347 						   MG_PLL_FRAC_LOCK(tc_port));
3348 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3349 
3350 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3351 	hw_state->mg_pll_tdc_coldst_bias =
3352 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3353 
3354 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3355 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3356 		hw_state->mg_pll_bias_mask = 0;
3357 	} else {
3358 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3359 		hw_state->mg_pll_bias_mask = -1U;
3360 	}
3361 
3362 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3363 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3364 
3365 	ret = true;
3366 out:
3367 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3368 	return ret;
3369 }
3370 
3371 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3372 				 struct intel_shared_dpll *pll,
3373 				 struct intel_dpll_hw_state *hw_state)
3374 {
3375 	const enum intel_dpll_id id = pll->info->id;
3376 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3377 	intel_wakeref_t wakeref;
3378 	bool ret = false;
3379 	u32 val;
3380 
3381 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3382 						     POWER_DOMAIN_DISPLAY_CORE);
3383 	if (!wakeref)
3384 		return false;
3385 
3386 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3387 	if (!(val & PLL_ENABLE))
3388 		goto out;
3389 
3390 	/*
3391 	 * All registers read here have the same HIP_INDEX_REG even though
3392 	 * they are on different building blocks
3393 	 */
3394 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3395 		       HIP_INDEX_VAL(tc_port, 0x2));
3396 
3397 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3398 						  DKL_REFCLKIN_CTL(tc_port));
3399 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3400 
3401 	hw_state->mg_clktop2_hsclkctl =
3402 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3403 	hw_state->mg_clktop2_hsclkctl &=
3404 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3405 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3406 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3407 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3408 
3409 	hw_state->mg_clktop2_coreclkctl1 =
3410 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3411 	hw_state->mg_clktop2_coreclkctl1 &=
3412 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3413 
3414 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3415 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3416 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3417 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3418 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3419 
3420 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3421 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3422 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3423 
3424 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3425 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3426 				 DKL_PLL_SSC_STEP_LEN_MASK |
3427 				 DKL_PLL_SSC_STEP_NUM_MASK |
3428 				 DKL_PLL_SSC_EN);
3429 
3430 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3431 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3432 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3433 
3434 	hw_state->mg_pll_tdc_coldst_bias =
3435 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3436 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3437 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3438 
3439 	ret = true;
3440 out:
3441 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3442 	return ret;
3443 }
3444 
3445 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3446 				 struct intel_shared_dpll *pll,
3447 				 struct intel_dpll_hw_state *hw_state,
3448 				 i915_reg_t enable_reg)
3449 {
3450 	const enum intel_dpll_id id = pll->info->id;
3451 	intel_wakeref_t wakeref;
3452 	bool ret = false;
3453 	u32 val;
3454 
3455 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3456 						     POWER_DOMAIN_DISPLAY_CORE);
3457 	if (!wakeref)
3458 		return false;
3459 
3460 	val = intel_de_read(dev_priv, enable_reg);
3461 	if (!(val & PLL_ENABLE))
3462 		goto out;
3463 
3464 	if (IS_ALDERLAKE_S(dev_priv)) {
3465 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3466 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3467 	} else if (IS_DG1(dev_priv)) {
3468 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3469 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3470 	} else if (IS_ROCKETLAKE(dev_priv)) {
3471 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3472 						 RKL_DPLL_CFGCR0(id));
3473 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3474 						 RKL_DPLL_CFGCR1(id));
3475 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3476 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3477 						 TGL_DPLL_CFGCR0(id));
3478 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3479 						 TGL_DPLL_CFGCR1(id));
3480 	} else {
3481 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3482 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3483 							 ICL_DPLL_CFGCR0(4));
3484 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3485 							 ICL_DPLL_CFGCR1(4));
3486 		} else {
3487 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3488 							 ICL_DPLL_CFGCR0(id));
3489 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3490 							 ICL_DPLL_CFGCR1(id));
3491 		}
3492 	}
3493 
3494 	ret = true;
3495 out:
3496 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3497 	return ret;
3498 }
3499 
3500 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3501 				   struct intel_shared_dpll *pll,
3502 				   struct intel_dpll_hw_state *hw_state)
3503 {
3504 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3505 
3506 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3507 }
3508 
3509 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3510 				 struct intel_shared_dpll *pll,
3511 				 struct intel_dpll_hw_state *hw_state)
3512 {
3513 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3514 }
3515 
3516 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3517 			   struct intel_shared_dpll *pll)
3518 {
3519 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3520 	const enum intel_dpll_id id = pll->info->id;
3521 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3522 
3523 	if (IS_ALDERLAKE_S(dev_priv)) {
3524 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3525 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3526 	} else if (IS_DG1(dev_priv)) {
3527 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3528 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3529 	} else if (IS_ROCKETLAKE(dev_priv)) {
3530 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3531 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3532 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3533 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3534 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3535 	} else {
3536 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3537 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3538 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3539 		} else {
3540 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3541 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3542 		}
3543 	}
3544 
3545 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3546 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3547 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3548 }
3549 
3550 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3551 			     struct intel_shared_dpll *pll)
3552 {
3553 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3554 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3555 	u32 val;
3556 
3557 	/*
3558 	 * Some of the following registers have reserved fields, so program
3559 	 * these with RMW based on a mask. The mask can be fixed or generated
3560 	 * during the calc/readout phase if the mask depends on some other HW
3561 	 * state like refclk, see icl_calc_mg_pll_state().
3562 	 */
3563 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3564 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3565 	val |= hw_state->mg_refclkin_ctl;
3566 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3567 
3568 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3569 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3570 	val |= hw_state->mg_clktop2_coreclkctl1;
3571 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3572 
3573 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3574 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3575 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3576 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3577 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3578 	val |= hw_state->mg_clktop2_hsclkctl;
3579 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3580 
3581 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3582 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3583 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3584 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3585 		       hw_state->mg_pll_frac_lock);
3586 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3587 
3588 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3589 	val &= ~hw_state->mg_pll_bias_mask;
3590 	val |= hw_state->mg_pll_bias;
3591 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3592 
3593 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3594 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3595 	val |= hw_state->mg_pll_tdc_coldst_bias;
3596 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3597 
3598 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3599 }
3600 
3601 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3602 			  struct intel_shared_dpll *pll)
3603 {
3604 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3605 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3606 	u32 val;
3607 
3608 	/*
3609 	 * All registers programmed here have the same HIP_INDEX_REG even
3610 	 * though on different building block
3611 	 */
3612 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3613 		       HIP_INDEX_VAL(tc_port, 0x2));
3614 
3615 	/* All the registers are RMW */
3616 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3617 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3618 	val |= hw_state->mg_refclkin_ctl;
3619 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3620 
3621 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3622 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3623 	val |= hw_state->mg_clktop2_coreclkctl1;
3624 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3625 
3626 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3627 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3628 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3629 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3630 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3631 	val |= hw_state->mg_clktop2_hsclkctl;
3632 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3633 
3634 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3635 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3636 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3637 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3638 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3639 	val |= hw_state->mg_pll_div0;
3640 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3641 
3642 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3643 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3644 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3645 	val |= hw_state->mg_pll_div1;
3646 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3647 
3648 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3649 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3650 		 DKL_PLL_SSC_STEP_LEN_MASK |
3651 		 DKL_PLL_SSC_STEP_NUM_MASK |
3652 		 DKL_PLL_SSC_EN);
3653 	val |= hw_state->mg_pll_ssc;
3654 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3655 
3656 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3657 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3658 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3659 	val |= hw_state->mg_pll_bias;
3660 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3661 
3662 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3663 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3664 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3665 	val |= hw_state->mg_pll_tdc_coldst_bias;
3666 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3667 
3668 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3669 }
3670 
3671 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3672 				 struct intel_shared_dpll *pll,
3673 				 i915_reg_t enable_reg)
3674 {
3675 	u32 val;
3676 
3677 	val = intel_de_read(dev_priv, enable_reg);
3678 	val |= PLL_POWER_ENABLE;
3679 	intel_de_write(dev_priv, enable_reg, val);
3680 
3681 	/*
3682 	 * The spec says we need to "wait" but it also says it should be
3683 	 * immediate.
3684 	 */
3685 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3686 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3687 			pll->info->id);
3688 }
3689 
3690 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3691 			   struct intel_shared_dpll *pll,
3692 			   i915_reg_t enable_reg)
3693 {
3694 	u32 val;
3695 
3696 	val = intel_de_read(dev_priv, enable_reg);
3697 	val |= PLL_ENABLE;
3698 	intel_de_write(dev_priv, enable_reg, val);
3699 
3700 	/* Timeout is actually 600us. */
3701 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3702 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3703 }
3704 
3705 static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
3706 {
3707 	u32 val;
3708 
3709 	if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
3710 	    pll->info->id != DPLL_ID_ICL_DPLL0)
3711 		return;
3712 	/*
3713 	 * Wa_16011069516:adl-p[a0]
3714 	 *
3715 	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
3716 	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
3717 	 * sanity check this assumption with a double read, which presumably
3718 	 * returns the correct value even with clock gating on.
3719 	 *
3720 	 * Instead of the usual place for workarounds we apply this one here,
3721 	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
3722 	 */
3723 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3724 	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3725 	intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
3726 	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
3727 		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
3728 }
3729 
3730 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3731 			     struct intel_shared_dpll *pll)
3732 {
3733 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3734 
3735 	if (IS_JSL_EHL(dev_priv) &&
3736 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3737 
3738 		/*
3739 		 * We need to disable DC states when this DPLL is enabled.
3740 		 * This can be done by taking a reference on DPLL4 power
3741 		 * domain.
3742 		 */
3743 		pll->wakeref = intel_display_power_get(dev_priv,
3744 						       POWER_DOMAIN_DC_OFF);
3745 	}
3746 
3747 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3748 
3749 	icl_dpll_write(dev_priv, pll);
3750 
3751 	/*
3752 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3753 	 * paths should already be setting the appropriate voltage, hence we do
3754 	 * nothing here.
3755 	 */
3756 
3757 	icl_pll_enable(dev_priv, pll, enable_reg);
3758 
3759 	adlp_cmtg_clock_gating_wa(dev_priv, pll);
3760 
3761 	/* DVFS post sequence would be here. See the comment above. */
3762 }
3763 
3764 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3765 			   struct intel_shared_dpll *pll)
3766 {
3767 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3768 
3769 	icl_dpll_write(dev_priv, pll);
3770 
3771 	/*
3772 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3773 	 * paths should already be setting the appropriate voltage, hence we do
3774 	 * nothing here.
3775 	 */
3776 
3777 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3778 
3779 	/* DVFS post sequence would be here. See the comment above. */
3780 }
3781 
3782 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3783 			  struct intel_shared_dpll *pll)
3784 {
3785 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3786 
3787 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3788 
3789 	if (DISPLAY_VER(dev_priv) >= 12)
3790 		dkl_pll_write(dev_priv, pll);
3791 	else
3792 		icl_mg_pll_write(dev_priv, pll);
3793 
3794 	/*
3795 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3796 	 * paths should already be setting the appropriate voltage, hence we do
3797 	 * nothing here.
3798 	 */
3799 
3800 	icl_pll_enable(dev_priv, pll, enable_reg);
3801 
3802 	/* DVFS post sequence would be here. See the comment above. */
3803 }
3804 
3805 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3806 			    struct intel_shared_dpll *pll,
3807 			    i915_reg_t enable_reg)
3808 {
3809 	u32 val;
3810 
3811 	/* The first steps are done by intel_ddi_post_disable(). */
3812 
3813 	/*
3814 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3815 	 * paths should already be setting the appropriate voltage, hence we do
3816 	 * nothing here.
3817 	 */
3818 
3819 	val = intel_de_read(dev_priv, enable_reg);
3820 	val &= ~PLL_ENABLE;
3821 	intel_de_write(dev_priv, enable_reg, val);
3822 
3823 	/* Timeout is actually 1us. */
3824 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3825 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3826 
3827 	/* DVFS post sequence would be here. See the comment above. */
3828 
3829 	val = intel_de_read(dev_priv, enable_reg);
3830 	val &= ~PLL_POWER_ENABLE;
3831 	intel_de_write(dev_priv, enable_reg, val);
3832 
3833 	/*
3834 	 * The spec says we need to "wait" but it also says it should be
3835 	 * immediate.
3836 	 */
3837 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3838 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3839 			pll->info->id);
3840 }
3841 
3842 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3843 			      struct intel_shared_dpll *pll)
3844 {
3845 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3846 
3847 	icl_pll_disable(dev_priv, pll, enable_reg);
3848 
3849 	if (IS_JSL_EHL(dev_priv) &&
3850 	    pll->info->id == DPLL_ID_EHL_DPLL4)
3851 		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3852 					pll->wakeref);
3853 }
3854 
3855 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3856 			    struct intel_shared_dpll *pll)
3857 {
3858 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3859 }
3860 
3861 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3862 			   struct intel_shared_dpll *pll)
3863 {
3864 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3865 
3866 	icl_pll_disable(dev_priv, pll, enable_reg);
3867 }
3868 
3869 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
3870 {
3871 	/* No SSC ref */
3872 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
3873 }
3874 
3875 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3876 			      const struct intel_dpll_hw_state *hw_state)
3877 {
3878 	drm_dbg_kms(&dev_priv->drm,
3879 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3880 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3881 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3882 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3883 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3884 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3885 		    hw_state->cfgcr0, hw_state->cfgcr1,
3886 		    hw_state->mg_refclkin_ctl,
3887 		    hw_state->mg_clktop2_coreclkctl1,
3888 		    hw_state->mg_clktop2_hsclkctl,
3889 		    hw_state->mg_pll_div0,
3890 		    hw_state->mg_pll_div1,
3891 		    hw_state->mg_pll_lf,
3892 		    hw_state->mg_pll_frac_lock,
3893 		    hw_state->mg_pll_ssc,
3894 		    hw_state->mg_pll_bias,
3895 		    hw_state->mg_pll_tdc_coldst_bias);
3896 }
3897 
3898 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3899 	.enable = combo_pll_enable,
3900 	.disable = combo_pll_disable,
3901 	.get_hw_state = combo_pll_get_hw_state,
3902 	.get_freq = icl_ddi_combo_pll_get_freq,
3903 };
3904 
3905 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3906 	.enable = tbt_pll_enable,
3907 	.disable = tbt_pll_disable,
3908 	.get_hw_state = tbt_pll_get_hw_state,
3909 	.get_freq = icl_ddi_tbt_pll_get_freq,
3910 };
3911 
3912 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3913 	.enable = mg_pll_enable,
3914 	.disable = mg_pll_disable,
3915 	.get_hw_state = mg_pll_get_hw_state,
3916 	.get_freq = icl_ddi_mg_pll_get_freq,
3917 };
3918 
3919 static const struct dpll_info icl_plls[] = {
3920 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3921 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3922 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3923 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3924 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3925 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3926 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3927 	{ },
3928 };
3929 
3930 static const struct intel_dpll_mgr icl_pll_mgr = {
3931 	.dpll_info = icl_plls,
3932 	.get_dplls = icl_get_dplls,
3933 	.put_dplls = icl_put_dplls,
3934 	.update_active_dpll = icl_update_active_dpll,
3935 	.update_ref_clks = icl_update_dpll_ref_clks,
3936 	.dump_hw_state = icl_dump_hw_state,
3937 };
3938 
3939 static const struct dpll_info ehl_plls[] = {
3940 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3941 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3942 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3943 	{ },
3944 };
3945 
3946 static const struct intel_dpll_mgr ehl_pll_mgr = {
3947 	.dpll_info = ehl_plls,
3948 	.get_dplls = icl_get_dplls,
3949 	.put_dplls = icl_put_dplls,
3950 	.update_ref_clks = icl_update_dpll_ref_clks,
3951 	.dump_hw_state = icl_dump_hw_state,
3952 };
3953 
3954 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3955 	.enable = mg_pll_enable,
3956 	.disable = mg_pll_disable,
3957 	.get_hw_state = dkl_pll_get_hw_state,
3958 	.get_freq = icl_ddi_mg_pll_get_freq,
3959 };
3960 
3961 static const struct dpll_info tgl_plls[] = {
3962 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3963 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3964 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3965 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3966 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3967 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3968 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3969 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3970 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3971 	{ },
3972 };
3973 
3974 static const struct intel_dpll_mgr tgl_pll_mgr = {
3975 	.dpll_info = tgl_plls,
3976 	.get_dplls = icl_get_dplls,
3977 	.put_dplls = icl_put_dplls,
3978 	.update_active_dpll = icl_update_active_dpll,
3979 	.update_ref_clks = icl_update_dpll_ref_clks,
3980 	.dump_hw_state = icl_dump_hw_state,
3981 };
3982 
3983 static const struct dpll_info rkl_plls[] = {
3984 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3985 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3986 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3987 	{ },
3988 };
3989 
3990 static const struct intel_dpll_mgr rkl_pll_mgr = {
3991 	.dpll_info = rkl_plls,
3992 	.get_dplls = icl_get_dplls,
3993 	.put_dplls = icl_put_dplls,
3994 	.update_ref_clks = icl_update_dpll_ref_clks,
3995 	.dump_hw_state = icl_dump_hw_state,
3996 };
3997 
3998 static const struct dpll_info dg1_plls[] = {
3999 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4000 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4001 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4002 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4003 	{ },
4004 };
4005 
4006 static const struct intel_dpll_mgr dg1_pll_mgr = {
4007 	.dpll_info = dg1_plls,
4008 	.get_dplls = icl_get_dplls,
4009 	.put_dplls = icl_put_dplls,
4010 	.update_ref_clks = icl_update_dpll_ref_clks,
4011 	.dump_hw_state = icl_dump_hw_state,
4012 };
4013 
4014 static const struct dpll_info adls_plls[] = {
4015 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4016 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4017 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4018 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4019 	{ },
4020 };
4021 
4022 static const struct intel_dpll_mgr adls_pll_mgr = {
4023 	.dpll_info = adls_plls,
4024 	.get_dplls = icl_get_dplls,
4025 	.put_dplls = icl_put_dplls,
4026 	.update_ref_clks = icl_update_dpll_ref_clks,
4027 	.dump_hw_state = icl_dump_hw_state,
4028 };
4029 
4030 static const struct dpll_info adlp_plls[] = {
4031 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4032 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4033 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4034 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4035 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4036 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4037 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4038 	{ },
4039 };
4040 
4041 static const struct intel_dpll_mgr adlp_pll_mgr = {
4042 	.dpll_info = adlp_plls,
4043 	.get_dplls = icl_get_dplls,
4044 	.put_dplls = icl_put_dplls,
4045 	.update_active_dpll = icl_update_active_dpll,
4046 	.update_ref_clks = icl_update_dpll_ref_clks,
4047 	.dump_hw_state = icl_dump_hw_state,
4048 };
4049 
4050 /**
4051  * intel_shared_dpll_init - Initialize shared DPLLs
4052  * @dev: drm device
4053  *
4054  * Initialize shared DPLLs for @dev.
4055  */
4056 void intel_shared_dpll_init(struct drm_device *dev)
4057 {
4058 	struct drm_i915_private *dev_priv = to_i915(dev);
4059 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4060 	const struct dpll_info *dpll_info;
4061 	int i;
4062 
4063 	if (IS_DG2(dev_priv))
4064 		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
4065 		dpll_mgr = NULL;
4066 	else if (IS_ALDERLAKE_P(dev_priv))
4067 		dpll_mgr = &adlp_pll_mgr;
4068 	else if (IS_ALDERLAKE_S(dev_priv))
4069 		dpll_mgr = &adls_pll_mgr;
4070 	else if (IS_DG1(dev_priv))
4071 		dpll_mgr = &dg1_pll_mgr;
4072 	else if (IS_ROCKETLAKE(dev_priv))
4073 		dpll_mgr = &rkl_pll_mgr;
4074 	else if (DISPLAY_VER(dev_priv) >= 12)
4075 		dpll_mgr = &tgl_pll_mgr;
4076 	else if (IS_JSL_EHL(dev_priv))
4077 		dpll_mgr = &ehl_pll_mgr;
4078 	else if (DISPLAY_VER(dev_priv) >= 11)
4079 		dpll_mgr = &icl_pll_mgr;
4080 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4081 		dpll_mgr = &bxt_pll_mgr;
4082 	else if (DISPLAY_VER(dev_priv) == 9)
4083 		dpll_mgr = &skl_pll_mgr;
4084 	else if (HAS_DDI(dev_priv))
4085 		dpll_mgr = &hsw_pll_mgr;
4086 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4087 		dpll_mgr = &pch_pll_mgr;
4088 
4089 	if (!dpll_mgr) {
4090 		dev_priv->dpll.num_shared_dpll = 0;
4091 		return;
4092 	}
4093 
4094 	dpll_info = dpll_mgr->dpll_info;
4095 
4096 	for (i = 0; dpll_info[i].name; i++) {
4097 		drm_WARN_ON(dev, i != dpll_info[i].id);
4098 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4099 	}
4100 
4101 	dev_priv->dpll.mgr = dpll_mgr;
4102 	dev_priv->dpll.num_shared_dpll = i;
4103 	mutex_init(&dev_priv->dpll.lock);
4104 
4105 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4106 }
4107 
4108 /**
4109  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4110  * @state: atomic state
4111  * @crtc: CRTC to reserve DPLLs for
4112  * @encoder: encoder
4113  *
4114  * This function reserves all required DPLLs for the given CRTC and encoder
4115  * combination in the current atomic commit @state and the new @crtc atomic
4116  * state.
4117  *
4118  * The new configuration in the atomic commit @state is made effective by
4119  * calling intel_shared_dpll_swap_state().
4120  *
4121  * The reserved DPLLs should be released by calling
4122  * intel_release_shared_dplls().
4123  *
4124  * Returns:
4125  * True if all required DPLLs were successfully reserved.
4126  */
4127 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4128 				struct intel_crtc *crtc,
4129 				struct intel_encoder *encoder)
4130 {
4131 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4132 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4133 
4134 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4135 		return false;
4136 
4137 	return dpll_mgr->get_dplls(state, crtc, encoder);
4138 }
4139 
4140 /**
4141  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4142  * @state: atomic state
4143  * @crtc: crtc from which the DPLLs are to be released
4144  *
4145  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4146  * from the current atomic commit @state and the old @crtc atomic state.
4147  *
4148  * The new configuration in the atomic commit @state is made effective by
4149  * calling intel_shared_dpll_swap_state().
4150  */
4151 void intel_release_shared_dplls(struct intel_atomic_state *state,
4152 				struct intel_crtc *crtc)
4153 {
4154 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4155 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4156 
4157 	/*
4158 	 * FIXME: this function is called for every platform having a
4159 	 * compute_clock hook, even though the platform doesn't yet support
4160 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4161 	 * called on those.
4162 	 */
4163 	if (!dpll_mgr)
4164 		return;
4165 
4166 	dpll_mgr->put_dplls(state, crtc);
4167 }
4168 
4169 /**
4170  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4171  * @state: atomic state
4172  * @crtc: the CRTC for which to update the active DPLL
4173  * @encoder: encoder determining the type of port DPLL
4174  *
4175  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4176  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4177  * DPLL selected will be based on the current mode of the encoder's port.
4178  */
4179 void intel_update_active_dpll(struct intel_atomic_state *state,
4180 			      struct intel_crtc *crtc,
4181 			      struct intel_encoder *encoder)
4182 {
4183 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4184 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4185 
4186 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4187 		return;
4188 
4189 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4190 }
4191 
4192 /**
4193  * intel_dpll_get_freq - calculate the DPLL's output frequency
4194  * @i915: i915 device
4195  * @pll: DPLL for which to calculate the output frequency
4196  * @pll_state: DPLL state from which to calculate the output frequency
4197  *
4198  * Return the output frequency corresponding to @pll's passed in @pll_state.
4199  */
4200 int intel_dpll_get_freq(struct drm_i915_private *i915,
4201 			const struct intel_shared_dpll *pll,
4202 			const struct intel_dpll_hw_state *pll_state)
4203 {
4204 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4205 		return 0;
4206 
4207 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4208 }
4209 
4210 /**
4211  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4212  * @i915: i915 device
4213  * @pll: DPLL for which to calculate the output frequency
4214  * @hw_state: DPLL's hardware state
4215  *
4216  * Read out @pll's hardware state into @hw_state.
4217  */
4218 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4219 			     struct intel_shared_dpll *pll,
4220 			     struct intel_dpll_hw_state *hw_state)
4221 {
4222 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4223 }
4224 
4225 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4226 				  struct intel_shared_dpll *pll)
4227 {
4228 	struct intel_crtc *crtc;
4229 
4230 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4231 
4232 	if (IS_JSL_EHL(i915) && pll->on &&
4233 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4234 		pll->wakeref = intel_display_power_get(i915,
4235 						       POWER_DOMAIN_DC_OFF);
4236 	}
4237 
4238 	pll->state.pipe_mask = 0;
4239 	for_each_intel_crtc(&i915->drm, crtc) {
4240 		struct intel_crtc_state *crtc_state =
4241 			to_intel_crtc_state(crtc->base.state);
4242 
4243 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4244 			pll->state.pipe_mask |= BIT(crtc->pipe);
4245 	}
4246 	pll->active_mask = pll->state.pipe_mask;
4247 
4248 	drm_dbg_kms(&i915->drm,
4249 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4250 		    pll->info->name, pll->state.pipe_mask, pll->on);
4251 }
4252 
4253 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4254 {
4255 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4256 		i915->dpll.mgr->update_ref_clks(i915);
4257 }
4258 
4259 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4260 {
4261 	int i;
4262 
4263 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4264 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4265 }
4266 
4267 static void sanitize_dpll_state(struct drm_i915_private *i915,
4268 				struct intel_shared_dpll *pll)
4269 {
4270 	if (!pll->on)
4271 		return;
4272 
4273 	adlp_cmtg_clock_gating_wa(i915, pll);
4274 
4275 	if (pll->active_mask)
4276 		return;
4277 
4278 	drm_dbg_kms(&i915->drm,
4279 		    "%s enabled but not in use, disabling\n",
4280 		    pll->info->name);
4281 
4282 	pll->info->funcs->disable(i915, pll);
4283 	pll->on = false;
4284 }
4285 
4286 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4287 {
4288 	int i;
4289 
4290 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4291 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4292 }
4293 
4294 /**
4295  * intel_dpll_dump_hw_state - write hw_state to dmesg
4296  * @dev_priv: i915 drm device
4297  * @hw_state: hw state to be written to the log
4298  *
4299  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4300  */
4301 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4302 			      const struct intel_dpll_hw_state *hw_state)
4303 {
4304 	if (dev_priv->dpll.mgr) {
4305 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4306 	} else {
4307 		/* fallback for platforms that don't use the shared dpll
4308 		 * infrastructure
4309 		 */
4310 		drm_dbg_kms(&dev_priv->drm,
4311 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4312 			    "fp0: 0x%x, fp1: 0x%x\n",
4313 			    hw_state->dpll,
4314 			    hw_state->dpll_md,
4315 			    hw_state->fp0,
4316 			    hw_state->fp1);
4317 	}
4318 }
4319