1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 struct intel_dpll_mgr {
49 	const struct dpll_info *dpll_info;
50 
51 	bool (*get_dplls)(struct intel_atomic_state *state,
52 			  struct intel_crtc *crtc,
53 			  struct intel_encoder *encoder);
54 	void (*put_dplls)(struct intel_atomic_state *state,
55 			  struct intel_crtc *crtc);
56 	void (*update_active_dpll)(struct intel_atomic_state *state,
57 				   struct intel_crtc *crtc,
58 				   struct intel_encoder *encoder);
59 	void (*update_ref_clks)(struct drm_i915_private *i915);
60 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
61 			      const struct intel_dpll_hw_state *hw_state);
62 };
63 
64 static void
65 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
66 				  struct intel_shared_dpll_state *shared_dpll)
67 {
68 	enum intel_dpll_id i;
69 
70 	/* Copy shared dpll state */
71 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
72 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
73 
74 		shared_dpll[i] = pll->state;
75 	}
76 }
77 
78 static struct intel_shared_dpll_state *
79 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
80 {
81 	struct intel_atomic_state *state = to_intel_atomic_state(s);
82 
83 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
84 
85 	if (!state->dpll_set) {
86 		state->dpll_set = true;
87 
88 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
89 						  state->shared_dpll);
90 	}
91 
92 	return state->shared_dpll;
93 }
94 
95 /**
96  * intel_get_shared_dpll_by_id - get a DPLL given its id
97  * @dev_priv: i915 device instance
98  * @id: pll id
99  *
100  * Returns:
101  * A pointer to the DPLL with @id
102  */
103 struct intel_shared_dpll *
104 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
105 			    enum intel_dpll_id id)
106 {
107 	return &dev_priv->dpll.shared_dplls[id];
108 }
109 
110 /**
111  * intel_get_shared_dpll_id - get the id of a DPLL
112  * @dev_priv: i915 device instance
113  * @pll: the DPLL
114  *
115  * Returns:
116  * The id of @pll
117  */
118 enum intel_dpll_id
119 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
120 			 struct intel_shared_dpll *pll)
121 {
122 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
123 
124 	if (drm_WARN_ON(&dev_priv->drm,
125 			pll_idx < 0 ||
126 			pll_idx >= dev_priv->dpll.num_shared_dpll))
127 		return -1;
128 
129 	return pll_idx;
130 }
131 
132 /* For ILK+ */
133 void assert_shared_dpll(struct drm_i915_private *dev_priv,
134 			struct intel_shared_dpll *pll,
135 			bool state)
136 {
137 	bool cur_state;
138 	struct intel_dpll_hw_state hw_state;
139 
140 	if (drm_WARN(&dev_priv->drm, !pll,
141 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
142 		return;
143 
144 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
145 	I915_STATE_WARN(cur_state != state,
146 	     "%s assertion failure (expected %s, current %s)\n",
147 			pll->info->name, onoff(state), onoff(cur_state));
148 }
149 
150 static i915_reg_t
151 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
152 			   struct intel_shared_dpll *pll)
153 {
154 	if (IS_DG1(i915))
155 		return DG1_DPLL_ENABLE(pll->info->id);
156 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
157 		return MG_PLL_ENABLE(0);
158 
159 	return CNL_DPLL_ENABLE(pll->info->id);
160 }
161 
162 /**
163  * intel_prepare_shared_dpll - call a dpll's prepare hook
164  * @crtc_state: CRTC, and its state, which has a shared dpll
165  *
166  * This calls the PLL's prepare hook if it has one and if the PLL is not
167  * already enabled. The prepare hook is platform specific.
168  */
169 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
170 {
171 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
173 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
174 
175 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
176 		return;
177 
178 	mutex_lock(&dev_priv->dpll.lock);
179 	drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
180 	if (!pll->active_mask) {
181 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
182 		drm_WARN_ON(&dev_priv->drm, pll->on);
183 		assert_shared_dpll_disabled(dev_priv, pll);
184 
185 		pll->info->funcs->prepare(dev_priv, pll);
186 	}
187 	mutex_unlock(&dev_priv->dpll.lock);
188 }
189 
190 /**
191  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
192  * @crtc_state: CRTC, and its state, which has a shared DPLL
193  *
194  * Enable the shared DPLL used by @crtc.
195  */
196 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
197 {
198 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
199 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
200 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
201 	unsigned int pipe_mask = BIT(crtc->pipe);
202 	unsigned int old_mask;
203 
204 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
205 		return;
206 
207 	mutex_lock(&dev_priv->dpll.lock);
208 	old_mask = pll->active_mask;
209 
210 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
211 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
212 		goto out;
213 
214 	pll->active_mask |= pipe_mask;
215 
216 	drm_dbg_kms(&dev_priv->drm,
217 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
218 		    pll->info->name, pll->active_mask, pll->on,
219 		    crtc->base.base.id, crtc->base.name);
220 
221 	if (old_mask) {
222 		drm_WARN_ON(&dev_priv->drm, !pll->on);
223 		assert_shared_dpll_enabled(dev_priv, pll);
224 		goto out;
225 	}
226 	drm_WARN_ON(&dev_priv->drm, pll->on);
227 
228 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
229 	pll->info->funcs->enable(dev_priv, pll);
230 	pll->on = true;
231 
232 out:
233 	mutex_unlock(&dev_priv->dpll.lock);
234 }
235 
236 /**
237  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
238  * @crtc_state: CRTC, and its state, which has a shared DPLL
239  *
240  * Disable the shared DPLL used by @crtc.
241  */
242 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
243 {
244 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
245 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
246 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
247 	unsigned int pipe_mask = BIT(crtc->pipe);
248 
249 	/* PCH only available on ILK+ */
250 	if (DISPLAY_VER(dev_priv) < 5)
251 		return;
252 
253 	if (pll == NULL)
254 		return;
255 
256 	mutex_lock(&dev_priv->dpll.lock);
257 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
258 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
259 		     crtc->base.base.id, crtc->base.name))
260 		goto out;
261 
262 	drm_dbg_kms(&dev_priv->drm,
263 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
264 		    pll->info->name, pll->active_mask, pll->on,
265 		    crtc->base.base.id, crtc->base.name);
266 
267 	assert_shared_dpll_enabled(dev_priv, pll);
268 	drm_WARN_ON(&dev_priv->drm, !pll->on);
269 
270 	pll->active_mask &= ~pipe_mask;
271 	if (pll->active_mask)
272 		goto out;
273 
274 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
275 	pll->info->funcs->disable(dev_priv, pll);
276 	pll->on = false;
277 
278 out:
279 	mutex_unlock(&dev_priv->dpll.lock);
280 }
281 
282 static struct intel_shared_dpll *
283 intel_find_shared_dpll(struct intel_atomic_state *state,
284 		       const struct intel_crtc *crtc,
285 		       const struct intel_dpll_hw_state *pll_state,
286 		       unsigned long dpll_mask)
287 {
288 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
289 	struct intel_shared_dpll *pll, *unused_pll = NULL;
290 	struct intel_shared_dpll_state *shared_dpll;
291 	enum intel_dpll_id i;
292 
293 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
294 
295 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
296 
297 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
298 		pll = &dev_priv->dpll.shared_dplls[i];
299 
300 		/* Only want to check enabled timings first */
301 		if (shared_dpll[i].pipe_mask == 0) {
302 			if (!unused_pll)
303 				unused_pll = pll;
304 			continue;
305 		}
306 
307 		if (memcmp(pll_state,
308 			   &shared_dpll[i].hw_state,
309 			   sizeof(*pll_state)) == 0) {
310 			drm_dbg_kms(&dev_priv->drm,
311 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
312 				    crtc->base.base.id, crtc->base.name,
313 				    pll->info->name,
314 				    shared_dpll[i].pipe_mask,
315 				    pll->active_mask);
316 			return pll;
317 		}
318 	}
319 
320 	/* Ok no matching timings, maybe there's a free one? */
321 	if (unused_pll) {
322 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
323 			    crtc->base.base.id, crtc->base.name,
324 			    unused_pll->info->name);
325 		return unused_pll;
326 	}
327 
328 	return NULL;
329 }
330 
331 static void
332 intel_reference_shared_dpll(struct intel_atomic_state *state,
333 			    const struct intel_crtc *crtc,
334 			    const struct intel_shared_dpll *pll,
335 			    const struct intel_dpll_hw_state *pll_state)
336 {
337 	struct drm_i915_private *i915 = to_i915(state->base.dev);
338 	struct intel_shared_dpll_state *shared_dpll;
339 	const enum intel_dpll_id id = pll->info->id;
340 
341 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
342 
343 	if (shared_dpll[id].pipe_mask == 0)
344 		shared_dpll[id].hw_state = *pll_state;
345 
346 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
347 		pipe_name(crtc->pipe));
348 
349 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
350 }
351 
352 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
353 					  const struct intel_crtc *crtc,
354 					  const struct intel_shared_dpll *pll)
355 {
356 	struct intel_shared_dpll_state *shared_dpll;
357 
358 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
359 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
360 }
361 
362 static void intel_put_dpll(struct intel_atomic_state *state,
363 			   struct intel_crtc *crtc)
364 {
365 	const struct intel_crtc_state *old_crtc_state =
366 		intel_atomic_get_old_crtc_state(state, crtc);
367 	struct intel_crtc_state *new_crtc_state =
368 		intel_atomic_get_new_crtc_state(state, crtc);
369 
370 	new_crtc_state->shared_dpll = NULL;
371 
372 	if (!old_crtc_state->shared_dpll)
373 		return;
374 
375 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
376 }
377 
378 /**
379  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
380  * @state: atomic state
381  *
382  * This is the dpll version of drm_atomic_helper_swap_state() since the
383  * helper does not handle driver-specific global state.
384  *
385  * For consistency with atomic helpers this function does a complete swap,
386  * i.e. it also puts the current state into @state, even though there is no
387  * need for that at this moment.
388  */
389 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
390 {
391 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
392 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
393 	enum intel_dpll_id i;
394 
395 	if (!state->dpll_set)
396 		return;
397 
398 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
399 		struct intel_shared_dpll *pll =
400 			&dev_priv->dpll.shared_dplls[i];
401 
402 		swap(pll->state, shared_dpll[i]);
403 	}
404 }
405 
406 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
407 				      struct intel_shared_dpll *pll,
408 				      struct intel_dpll_hw_state *hw_state)
409 {
410 	const enum intel_dpll_id id = pll->info->id;
411 	intel_wakeref_t wakeref;
412 	u32 val;
413 
414 	wakeref = intel_display_power_get_if_enabled(dev_priv,
415 						     POWER_DOMAIN_DISPLAY_CORE);
416 	if (!wakeref)
417 		return false;
418 
419 	val = intel_de_read(dev_priv, PCH_DPLL(id));
420 	hw_state->dpll = val;
421 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
422 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
423 
424 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
425 
426 	return val & DPLL_VCO_ENABLE;
427 }
428 
429 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
430 				 struct intel_shared_dpll *pll)
431 {
432 	const enum intel_dpll_id id = pll->info->id;
433 
434 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
435 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
436 }
437 
438 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
439 {
440 	u32 val;
441 	bool enabled;
442 
443 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
444 
445 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
446 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
447 			    DREF_SUPERSPREAD_SOURCE_MASK));
448 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
449 }
450 
451 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
452 				struct intel_shared_dpll *pll)
453 {
454 	const enum intel_dpll_id id = pll->info->id;
455 
456 	/* PCH refclock must be enabled first */
457 	ibx_assert_pch_refclk_enabled(dev_priv);
458 
459 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
460 
461 	/* Wait for the clocks to stabilize. */
462 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
463 	udelay(150);
464 
465 	/* The pixel multiplier can only be updated once the
466 	 * DPLL is enabled and the clocks are stable.
467 	 *
468 	 * So write it again.
469 	 */
470 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
471 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
472 	udelay(200);
473 }
474 
475 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
476 				 struct intel_shared_dpll *pll)
477 {
478 	const enum intel_dpll_id id = pll->info->id;
479 
480 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
481 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
482 	udelay(200);
483 }
484 
485 static bool ibx_get_dpll(struct intel_atomic_state *state,
486 			 struct intel_crtc *crtc,
487 			 struct intel_encoder *encoder)
488 {
489 	struct intel_crtc_state *crtc_state =
490 		intel_atomic_get_new_crtc_state(state, crtc);
491 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
492 	struct intel_shared_dpll *pll;
493 	enum intel_dpll_id i;
494 
495 	if (HAS_PCH_IBX(dev_priv)) {
496 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
497 		i = (enum intel_dpll_id) crtc->pipe;
498 		pll = &dev_priv->dpll.shared_dplls[i];
499 
500 		drm_dbg_kms(&dev_priv->drm,
501 			    "[CRTC:%d:%s] using pre-allocated %s\n",
502 			    crtc->base.base.id, crtc->base.name,
503 			    pll->info->name);
504 	} else {
505 		pll = intel_find_shared_dpll(state, crtc,
506 					     &crtc_state->dpll_hw_state,
507 					     BIT(DPLL_ID_PCH_PLL_B) |
508 					     BIT(DPLL_ID_PCH_PLL_A));
509 	}
510 
511 	if (!pll)
512 		return false;
513 
514 	/* reference the pll */
515 	intel_reference_shared_dpll(state, crtc,
516 				    pll, &crtc_state->dpll_hw_state);
517 
518 	crtc_state->shared_dpll = pll;
519 
520 	return true;
521 }
522 
523 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
524 			      const struct intel_dpll_hw_state *hw_state)
525 {
526 	drm_dbg_kms(&dev_priv->drm,
527 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
528 		    "fp0: 0x%x, fp1: 0x%x\n",
529 		    hw_state->dpll,
530 		    hw_state->dpll_md,
531 		    hw_state->fp0,
532 		    hw_state->fp1);
533 }
534 
535 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
536 	.prepare = ibx_pch_dpll_prepare,
537 	.enable = ibx_pch_dpll_enable,
538 	.disable = ibx_pch_dpll_disable,
539 	.get_hw_state = ibx_pch_dpll_get_hw_state,
540 };
541 
542 static const struct dpll_info pch_plls[] = {
543 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
544 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
545 	{ },
546 };
547 
548 static const struct intel_dpll_mgr pch_pll_mgr = {
549 	.dpll_info = pch_plls,
550 	.get_dplls = ibx_get_dpll,
551 	.put_dplls = intel_put_dpll,
552 	.dump_hw_state = ibx_dump_hw_state,
553 };
554 
555 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
556 			       struct intel_shared_dpll *pll)
557 {
558 	const enum intel_dpll_id id = pll->info->id;
559 
560 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
561 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
562 	udelay(20);
563 }
564 
565 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
566 				struct intel_shared_dpll *pll)
567 {
568 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
569 	intel_de_posting_read(dev_priv, SPLL_CTL);
570 	udelay(20);
571 }
572 
573 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
574 				  struct intel_shared_dpll *pll)
575 {
576 	const enum intel_dpll_id id = pll->info->id;
577 	u32 val;
578 
579 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
580 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
581 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
582 
583 	/*
584 	 * Try to set up the PCH reference clock once all DPLLs
585 	 * that depend on it have been shut down.
586 	 */
587 	if (dev_priv->pch_ssc_use & BIT(id))
588 		intel_init_pch_refclk(dev_priv);
589 }
590 
591 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
592 				 struct intel_shared_dpll *pll)
593 {
594 	enum intel_dpll_id id = pll->info->id;
595 	u32 val;
596 
597 	val = intel_de_read(dev_priv, SPLL_CTL);
598 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
599 	intel_de_posting_read(dev_priv, SPLL_CTL);
600 
601 	/*
602 	 * Try to set up the PCH reference clock once all DPLLs
603 	 * that depend on it have been shut down.
604 	 */
605 	if (dev_priv->pch_ssc_use & BIT(id))
606 		intel_init_pch_refclk(dev_priv);
607 }
608 
609 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
610 				       struct intel_shared_dpll *pll,
611 				       struct intel_dpll_hw_state *hw_state)
612 {
613 	const enum intel_dpll_id id = pll->info->id;
614 	intel_wakeref_t wakeref;
615 	u32 val;
616 
617 	wakeref = intel_display_power_get_if_enabled(dev_priv,
618 						     POWER_DOMAIN_DISPLAY_CORE);
619 	if (!wakeref)
620 		return false;
621 
622 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
623 	hw_state->wrpll = val;
624 
625 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
626 
627 	return val & WRPLL_PLL_ENABLE;
628 }
629 
630 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
631 				      struct intel_shared_dpll *pll,
632 				      struct intel_dpll_hw_state *hw_state)
633 {
634 	intel_wakeref_t wakeref;
635 	u32 val;
636 
637 	wakeref = intel_display_power_get_if_enabled(dev_priv,
638 						     POWER_DOMAIN_DISPLAY_CORE);
639 	if (!wakeref)
640 		return false;
641 
642 	val = intel_de_read(dev_priv, SPLL_CTL);
643 	hw_state->spll = val;
644 
645 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
646 
647 	return val & SPLL_PLL_ENABLE;
648 }
649 
650 #define LC_FREQ 2700
651 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
652 
653 #define P_MIN 2
654 #define P_MAX 64
655 #define P_INC 2
656 
657 /* Constraints for PLL good behavior */
658 #define REF_MIN 48
659 #define REF_MAX 400
660 #define VCO_MIN 2400
661 #define VCO_MAX 4800
662 
663 struct hsw_wrpll_rnp {
664 	unsigned p, n2, r2;
665 };
666 
667 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
668 {
669 	unsigned budget;
670 
671 	switch (clock) {
672 	case 25175000:
673 	case 25200000:
674 	case 27000000:
675 	case 27027000:
676 	case 37762500:
677 	case 37800000:
678 	case 40500000:
679 	case 40541000:
680 	case 54000000:
681 	case 54054000:
682 	case 59341000:
683 	case 59400000:
684 	case 72000000:
685 	case 74176000:
686 	case 74250000:
687 	case 81000000:
688 	case 81081000:
689 	case 89012000:
690 	case 89100000:
691 	case 108000000:
692 	case 108108000:
693 	case 111264000:
694 	case 111375000:
695 	case 148352000:
696 	case 148500000:
697 	case 162000000:
698 	case 162162000:
699 	case 222525000:
700 	case 222750000:
701 	case 296703000:
702 	case 297000000:
703 		budget = 0;
704 		break;
705 	case 233500000:
706 	case 245250000:
707 	case 247750000:
708 	case 253250000:
709 	case 298000000:
710 		budget = 1500;
711 		break;
712 	case 169128000:
713 	case 169500000:
714 	case 179500000:
715 	case 202000000:
716 		budget = 2000;
717 		break;
718 	case 256250000:
719 	case 262500000:
720 	case 270000000:
721 	case 272500000:
722 	case 273750000:
723 	case 280750000:
724 	case 281250000:
725 	case 286000000:
726 	case 291750000:
727 		budget = 4000;
728 		break;
729 	case 267250000:
730 	case 268500000:
731 		budget = 5000;
732 		break;
733 	default:
734 		budget = 1000;
735 		break;
736 	}
737 
738 	return budget;
739 }
740 
741 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
742 				 unsigned int r2, unsigned int n2,
743 				 unsigned int p,
744 				 struct hsw_wrpll_rnp *best)
745 {
746 	u64 a, b, c, d, diff, diff_best;
747 
748 	/* No best (r,n,p) yet */
749 	if (best->p == 0) {
750 		best->p = p;
751 		best->n2 = n2;
752 		best->r2 = r2;
753 		return;
754 	}
755 
756 	/*
757 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
758 	 * freq2k.
759 	 *
760 	 * delta = 1e6 *
761 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
762 	 *	   freq2k;
763 	 *
764 	 * and we would like delta <= budget.
765 	 *
766 	 * If the discrepancy is above the PPM-based budget, always prefer to
767 	 * improve upon the previous solution.  However, if you're within the
768 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
769 	 */
770 	a = freq2k * budget * p * r2;
771 	b = freq2k * budget * best->p * best->r2;
772 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
773 	diff_best = abs_diff(freq2k * best->p * best->r2,
774 			     LC_FREQ_2K * best->n2);
775 	c = 1000000 * diff;
776 	d = 1000000 * diff_best;
777 
778 	if (a < c && b < d) {
779 		/* If both are above the budget, pick the closer */
780 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
781 			best->p = p;
782 			best->n2 = n2;
783 			best->r2 = r2;
784 		}
785 	} else if (a >= c && b < d) {
786 		/* If A is below the threshold but B is above it?  Update. */
787 		best->p = p;
788 		best->n2 = n2;
789 		best->r2 = r2;
790 	} else if (a >= c && b >= d) {
791 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
792 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
793 			best->p = p;
794 			best->n2 = n2;
795 			best->r2 = r2;
796 		}
797 	}
798 	/* Otherwise a < c && b >= d, do nothing */
799 }
800 
801 static void
802 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
803 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
804 {
805 	u64 freq2k;
806 	unsigned p, n2, r2;
807 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
808 	unsigned budget;
809 
810 	freq2k = clock / 100;
811 
812 	budget = hsw_wrpll_get_budget_for_freq(clock);
813 
814 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
815 	 * and directly pass the LC PLL to it. */
816 	if (freq2k == 5400000) {
817 		*n2_out = 2;
818 		*p_out = 1;
819 		*r2_out = 2;
820 		return;
821 	}
822 
823 	/*
824 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
825 	 * the WR PLL.
826 	 *
827 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
828 	 * Injecting R2 = 2 * R gives:
829 	 *   REF_MAX * r2 > LC_FREQ * 2 and
830 	 *   REF_MIN * r2 < LC_FREQ * 2
831 	 *
832 	 * Which means the desired boundaries for r2 are:
833 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
834 	 *
835 	 */
836 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
837 	     r2 <= LC_FREQ * 2 / REF_MIN;
838 	     r2++) {
839 
840 		/*
841 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
842 		 *
843 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
844 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
845 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
846 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
847 		 *
848 		 * Which means the desired boundaries for n2 are:
849 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
850 		 */
851 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
852 		     n2 <= VCO_MAX * r2 / LC_FREQ;
853 		     n2++) {
854 
855 			for (p = P_MIN; p <= P_MAX; p += P_INC)
856 				hsw_wrpll_update_rnp(freq2k, budget,
857 						     r2, n2, p, &best);
858 		}
859 	}
860 
861 	*n2_out = best.n2;
862 	*p_out = best.p;
863 	*r2_out = best.r2;
864 }
865 
866 static struct intel_shared_dpll *
867 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
868 		       struct intel_crtc *crtc)
869 {
870 	struct intel_crtc_state *crtc_state =
871 		intel_atomic_get_new_crtc_state(state, crtc);
872 	struct intel_shared_dpll *pll;
873 	u32 val;
874 	unsigned int p, n2, r2;
875 
876 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
877 
878 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
879 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
880 	      WRPLL_DIVIDER_POST(p);
881 
882 	crtc_state->dpll_hw_state.wrpll = val;
883 
884 	pll = intel_find_shared_dpll(state, crtc,
885 				     &crtc_state->dpll_hw_state,
886 				     BIT(DPLL_ID_WRPLL2) |
887 				     BIT(DPLL_ID_WRPLL1));
888 
889 	if (!pll)
890 		return NULL;
891 
892 	return pll;
893 }
894 
895 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
896 				  const struct intel_shared_dpll *pll,
897 				  const struct intel_dpll_hw_state *pll_state)
898 {
899 	int refclk;
900 	int n, p, r;
901 	u32 wrpll = pll_state->wrpll;
902 
903 	switch (wrpll & WRPLL_REF_MASK) {
904 	case WRPLL_REF_SPECIAL_HSW:
905 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
906 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
907 			refclk = dev_priv->dpll.ref_clks.nssc;
908 			break;
909 		}
910 		fallthrough;
911 	case WRPLL_REF_PCH_SSC:
912 		/*
913 		 * We could calculate spread here, but our checking
914 		 * code only cares about 5% accuracy, and spread is a max of
915 		 * 0.5% downspread.
916 		 */
917 		refclk = dev_priv->dpll.ref_clks.ssc;
918 		break;
919 	case WRPLL_REF_LCPLL:
920 		refclk = 2700000;
921 		break;
922 	default:
923 		MISSING_CASE(wrpll);
924 		return 0;
925 	}
926 
927 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
928 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
929 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
930 
931 	/* Convert to KHz, p & r have a fixed point portion */
932 	return (refclk * n / 10) / (p * r) * 2;
933 }
934 
935 static struct intel_shared_dpll *
936 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
937 {
938 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
939 	struct intel_shared_dpll *pll;
940 	enum intel_dpll_id pll_id;
941 	int clock = crtc_state->port_clock;
942 
943 	switch (clock / 2) {
944 	case 81000:
945 		pll_id = DPLL_ID_LCPLL_810;
946 		break;
947 	case 135000:
948 		pll_id = DPLL_ID_LCPLL_1350;
949 		break;
950 	case 270000:
951 		pll_id = DPLL_ID_LCPLL_2700;
952 		break;
953 	default:
954 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
955 			    clock);
956 		return NULL;
957 	}
958 
959 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
960 
961 	if (!pll)
962 		return NULL;
963 
964 	return pll;
965 }
966 
967 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
968 				  const struct intel_shared_dpll *pll,
969 				  const struct intel_dpll_hw_state *pll_state)
970 {
971 	int link_clock = 0;
972 
973 	switch (pll->info->id) {
974 	case DPLL_ID_LCPLL_810:
975 		link_clock = 81000;
976 		break;
977 	case DPLL_ID_LCPLL_1350:
978 		link_clock = 135000;
979 		break;
980 	case DPLL_ID_LCPLL_2700:
981 		link_clock = 270000;
982 		break;
983 	default:
984 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
985 		break;
986 	}
987 
988 	return link_clock * 2;
989 }
990 
991 static struct intel_shared_dpll *
992 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
993 		      struct intel_crtc *crtc)
994 {
995 	struct intel_crtc_state *crtc_state =
996 		intel_atomic_get_new_crtc_state(state, crtc);
997 
998 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
999 		return NULL;
1000 
1001 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1002 					 SPLL_REF_MUXED_SSC;
1003 
1004 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1005 				      BIT(DPLL_ID_SPLL));
1006 }
1007 
1008 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1009 				 const struct intel_shared_dpll *pll,
1010 				 const struct intel_dpll_hw_state *pll_state)
1011 {
1012 	int link_clock = 0;
1013 
1014 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1015 	case SPLL_FREQ_810MHz:
1016 		link_clock = 81000;
1017 		break;
1018 	case SPLL_FREQ_1350MHz:
1019 		link_clock = 135000;
1020 		break;
1021 	case SPLL_FREQ_2700MHz:
1022 		link_clock = 270000;
1023 		break;
1024 	default:
1025 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1026 		break;
1027 	}
1028 
1029 	return link_clock * 2;
1030 }
1031 
1032 static bool hsw_get_dpll(struct intel_atomic_state *state,
1033 			 struct intel_crtc *crtc,
1034 			 struct intel_encoder *encoder)
1035 {
1036 	struct intel_crtc_state *crtc_state =
1037 		intel_atomic_get_new_crtc_state(state, crtc);
1038 	struct intel_shared_dpll *pll;
1039 
1040 	memset(&crtc_state->dpll_hw_state, 0,
1041 	       sizeof(crtc_state->dpll_hw_state));
1042 
1043 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1044 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1045 	else if (intel_crtc_has_dp_encoder(crtc_state))
1046 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1047 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1048 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1049 	else
1050 		return false;
1051 
1052 	if (!pll)
1053 		return false;
1054 
1055 	intel_reference_shared_dpll(state, crtc,
1056 				    pll, &crtc_state->dpll_hw_state);
1057 
1058 	crtc_state->shared_dpll = pll;
1059 
1060 	return true;
1061 }
1062 
1063 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1064 {
1065 	i915->dpll.ref_clks.ssc = 135000;
1066 	/* Non-SSC is only used on non-ULT HSW. */
1067 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1068 		i915->dpll.ref_clks.nssc = 24000;
1069 	else
1070 		i915->dpll.ref_clks.nssc = 135000;
1071 }
1072 
1073 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1074 			      const struct intel_dpll_hw_state *hw_state)
1075 {
1076 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1077 		    hw_state->wrpll, hw_state->spll);
1078 }
1079 
1080 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1081 	.enable = hsw_ddi_wrpll_enable,
1082 	.disable = hsw_ddi_wrpll_disable,
1083 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1084 	.get_freq = hsw_ddi_wrpll_get_freq,
1085 };
1086 
1087 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1088 	.enable = hsw_ddi_spll_enable,
1089 	.disable = hsw_ddi_spll_disable,
1090 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1091 	.get_freq = hsw_ddi_spll_get_freq,
1092 };
1093 
1094 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1095 				 struct intel_shared_dpll *pll)
1096 {
1097 }
1098 
1099 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1100 				  struct intel_shared_dpll *pll)
1101 {
1102 }
1103 
1104 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1105 				       struct intel_shared_dpll *pll,
1106 				       struct intel_dpll_hw_state *hw_state)
1107 {
1108 	return true;
1109 }
1110 
1111 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1112 	.enable = hsw_ddi_lcpll_enable,
1113 	.disable = hsw_ddi_lcpll_disable,
1114 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1115 	.get_freq = hsw_ddi_lcpll_get_freq,
1116 };
1117 
1118 static const struct dpll_info hsw_plls[] = {
1119 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1120 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1121 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1122 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1123 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1124 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1125 	{ },
1126 };
1127 
1128 static const struct intel_dpll_mgr hsw_pll_mgr = {
1129 	.dpll_info = hsw_plls,
1130 	.get_dplls = hsw_get_dpll,
1131 	.put_dplls = intel_put_dpll,
1132 	.update_ref_clks = hsw_update_dpll_ref_clks,
1133 	.dump_hw_state = hsw_dump_hw_state,
1134 };
1135 
1136 struct skl_dpll_regs {
1137 	i915_reg_t ctl, cfgcr1, cfgcr2;
1138 };
1139 
1140 /* this array is indexed by the *shared* pll id */
1141 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1142 	{
1143 		/* DPLL 0 */
1144 		.ctl = LCPLL1_CTL,
1145 		/* DPLL 0 doesn't support HDMI mode */
1146 	},
1147 	{
1148 		/* DPLL 1 */
1149 		.ctl = LCPLL2_CTL,
1150 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1151 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1152 	},
1153 	{
1154 		/* DPLL 2 */
1155 		.ctl = WRPLL_CTL(0),
1156 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1157 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1158 	},
1159 	{
1160 		/* DPLL 3 */
1161 		.ctl = WRPLL_CTL(1),
1162 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1163 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1164 	},
1165 };
1166 
1167 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1168 				    struct intel_shared_dpll *pll)
1169 {
1170 	const enum intel_dpll_id id = pll->info->id;
1171 	u32 val;
1172 
1173 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1174 
1175 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1176 		 DPLL_CTRL1_SSC(id) |
1177 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1178 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1179 
1180 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1181 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1182 }
1183 
1184 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1185 			       struct intel_shared_dpll *pll)
1186 {
1187 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1188 	const enum intel_dpll_id id = pll->info->id;
1189 
1190 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1191 
1192 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1193 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1194 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1195 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1196 
1197 	/* the enable bit is always bit 31 */
1198 	intel_de_write(dev_priv, regs[id].ctl,
1199 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1200 
1201 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1202 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1203 }
1204 
1205 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1206 				 struct intel_shared_dpll *pll)
1207 {
1208 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1209 }
1210 
1211 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1212 				struct intel_shared_dpll *pll)
1213 {
1214 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1215 	const enum intel_dpll_id id = pll->info->id;
1216 
1217 	/* the enable bit is always bit 31 */
1218 	intel_de_write(dev_priv, regs[id].ctl,
1219 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1220 	intel_de_posting_read(dev_priv, regs[id].ctl);
1221 }
1222 
1223 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1224 				  struct intel_shared_dpll *pll)
1225 {
1226 }
1227 
1228 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1229 				     struct intel_shared_dpll *pll,
1230 				     struct intel_dpll_hw_state *hw_state)
1231 {
1232 	u32 val;
1233 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1234 	const enum intel_dpll_id id = pll->info->id;
1235 	intel_wakeref_t wakeref;
1236 	bool ret;
1237 
1238 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1239 						     POWER_DOMAIN_DISPLAY_CORE);
1240 	if (!wakeref)
1241 		return false;
1242 
1243 	ret = false;
1244 
1245 	val = intel_de_read(dev_priv, regs[id].ctl);
1246 	if (!(val & LCPLL_PLL_ENABLE))
1247 		goto out;
1248 
1249 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1250 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1251 
1252 	/* avoid reading back stale values if HDMI mode is not enabled */
1253 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1254 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1255 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1256 	}
1257 	ret = true;
1258 
1259 out:
1260 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1261 
1262 	return ret;
1263 }
1264 
1265 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1266 				       struct intel_shared_dpll *pll,
1267 				       struct intel_dpll_hw_state *hw_state)
1268 {
1269 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1270 	const enum intel_dpll_id id = pll->info->id;
1271 	intel_wakeref_t wakeref;
1272 	u32 val;
1273 	bool ret;
1274 
1275 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1276 						     POWER_DOMAIN_DISPLAY_CORE);
1277 	if (!wakeref)
1278 		return false;
1279 
1280 	ret = false;
1281 
1282 	/* DPLL0 is always enabled since it drives CDCLK */
1283 	val = intel_de_read(dev_priv, regs[id].ctl);
1284 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1285 		goto out;
1286 
1287 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1288 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1289 
1290 	ret = true;
1291 
1292 out:
1293 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1294 
1295 	return ret;
1296 }
1297 
1298 struct skl_wrpll_context {
1299 	u64 min_deviation;		/* current minimal deviation */
1300 	u64 central_freq;		/* chosen central freq */
1301 	u64 dco_freq;			/* chosen dco freq */
1302 	unsigned int p;			/* chosen divider */
1303 };
1304 
1305 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1306 {
1307 	memset(ctx, 0, sizeof(*ctx));
1308 
1309 	ctx->min_deviation = U64_MAX;
1310 }
1311 
1312 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1313 #define SKL_DCO_MAX_PDEVIATION	100
1314 #define SKL_DCO_MAX_NDEVIATION	600
1315 
1316 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1317 				  u64 central_freq,
1318 				  u64 dco_freq,
1319 				  unsigned int divider)
1320 {
1321 	u64 deviation;
1322 
1323 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1324 			      central_freq);
1325 
1326 	/* positive deviation */
1327 	if (dco_freq >= central_freq) {
1328 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1329 		    deviation < ctx->min_deviation) {
1330 			ctx->min_deviation = deviation;
1331 			ctx->central_freq = central_freq;
1332 			ctx->dco_freq = dco_freq;
1333 			ctx->p = divider;
1334 		}
1335 	/* negative deviation */
1336 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1337 		   deviation < ctx->min_deviation) {
1338 		ctx->min_deviation = deviation;
1339 		ctx->central_freq = central_freq;
1340 		ctx->dco_freq = dco_freq;
1341 		ctx->p = divider;
1342 	}
1343 }
1344 
1345 static void skl_wrpll_get_multipliers(unsigned int p,
1346 				      unsigned int *p0 /* out */,
1347 				      unsigned int *p1 /* out */,
1348 				      unsigned int *p2 /* out */)
1349 {
1350 	/* even dividers */
1351 	if (p % 2 == 0) {
1352 		unsigned int half = p / 2;
1353 
1354 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1355 			*p0 = 2;
1356 			*p1 = 1;
1357 			*p2 = half;
1358 		} else if (half % 2 == 0) {
1359 			*p0 = 2;
1360 			*p1 = half / 2;
1361 			*p2 = 2;
1362 		} else if (half % 3 == 0) {
1363 			*p0 = 3;
1364 			*p1 = half / 3;
1365 			*p2 = 2;
1366 		} else if (half % 7 == 0) {
1367 			*p0 = 7;
1368 			*p1 = half / 7;
1369 			*p2 = 2;
1370 		}
1371 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1372 		*p0 = 3;
1373 		*p1 = 1;
1374 		*p2 = p / 3;
1375 	} else if (p == 5 || p == 7) {
1376 		*p0 = p;
1377 		*p1 = 1;
1378 		*p2 = 1;
1379 	} else if (p == 15) {
1380 		*p0 = 3;
1381 		*p1 = 1;
1382 		*p2 = 5;
1383 	} else if (p == 21) {
1384 		*p0 = 7;
1385 		*p1 = 1;
1386 		*p2 = 3;
1387 	} else if (p == 35) {
1388 		*p0 = 7;
1389 		*p1 = 1;
1390 		*p2 = 5;
1391 	}
1392 }
1393 
1394 struct skl_wrpll_params {
1395 	u32 dco_fraction;
1396 	u32 dco_integer;
1397 	u32 qdiv_ratio;
1398 	u32 qdiv_mode;
1399 	u32 kdiv;
1400 	u32 pdiv;
1401 	u32 central_freq;
1402 };
1403 
1404 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1405 				      u64 afe_clock,
1406 				      int ref_clock,
1407 				      u64 central_freq,
1408 				      u32 p0, u32 p1, u32 p2)
1409 {
1410 	u64 dco_freq;
1411 
1412 	switch (central_freq) {
1413 	case 9600000000ULL:
1414 		params->central_freq = 0;
1415 		break;
1416 	case 9000000000ULL:
1417 		params->central_freq = 1;
1418 		break;
1419 	case 8400000000ULL:
1420 		params->central_freq = 3;
1421 	}
1422 
1423 	switch (p0) {
1424 	case 1:
1425 		params->pdiv = 0;
1426 		break;
1427 	case 2:
1428 		params->pdiv = 1;
1429 		break;
1430 	case 3:
1431 		params->pdiv = 2;
1432 		break;
1433 	case 7:
1434 		params->pdiv = 4;
1435 		break;
1436 	default:
1437 		WARN(1, "Incorrect PDiv\n");
1438 	}
1439 
1440 	switch (p2) {
1441 	case 5:
1442 		params->kdiv = 0;
1443 		break;
1444 	case 2:
1445 		params->kdiv = 1;
1446 		break;
1447 	case 3:
1448 		params->kdiv = 2;
1449 		break;
1450 	case 1:
1451 		params->kdiv = 3;
1452 		break;
1453 	default:
1454 		WARN(1, "Incorrect KDiv\n");
1455 	}
1456 
1457 	params->qdiv_ratio = p1;
1458 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1459 
1460 	dco_freq = p0 * p1 * p2 * afe_clock;
1461 
1462 	/*
1463 	 * Intermediate values are in Hz.
1464 	 * Divide by MHz to match bsepc
1465 	 */
1466 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1467 	params->dco_fraction =
1468 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1469 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1470 }
1471 
1472 static bool
1473 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1474 			int ref_clock,
1475 			struct skl_wrpll_params *wrpll_params)
1476 {
1477 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1478 	u64 dco_central_freq[3] = { 8400000000ULL,
1479 				    9000000000ULL,
1480 				    9600000000ULL };
1481 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1482 					     24, 28, 30, 32, 36, 40, 42, 44,
1483 					     48, 52, 54, 56, 60, 64, 66, 68,
1484 					     70, 72, 76, 78, 80, 84, 88, 90,
1485 					     92, 96, 98 };
1486 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1487 	static const struct {
1488 		const int *list;
1489 		int n_dividers;
1490 	} dividers[] = {
1491 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1492 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1493 	};
1494 	struct skl_wrpll_context ctx;
1495 	unsigned int dco, d, i;
1496 	unsigned int p0, p1, p2;
1497 
1498 	skl_wrpll_context_init(&ctx);
1499 
1500 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1501 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1502 			for (i = 0; i < dividers[d].n_dividers; i++) {
1503 				unsigned int p = dividers[d].list[i];
1504 				u64 dco_freq = p * afe_clock;
1505 
1506 				skl_wrpll_try_divider(&ctx,
1507 						      dco_central_freq[dco],
1508 						      dco_freq,
1509 						      p);
1510 				/*
1511 				 * Skip the remaining dividers if we're sure to
1512 				 * have found the definitive divider, we can't
1513 				 * improve a 0 deviation.
1514 				 */
1515 				if (ctx.min_deviation == 0)
1516 					goto skip_remaining_dividers;
1517 			}
1518 		}
1519 
1520 skip_remaining_dividers:
1521 		/*
1522 		 * If a solution is found with an even divider, prefer
1523 		 * this one.
1524 		 */
1525 		if (d == 0 && ctx.p)
1526 			break;
1527 	}
1528 
1529 	if (!ctx.p) {
1530 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1531 		return false;
1532 	}
1533 
1534 	/*
1535 	 * gcc incorrectly analyses that these can be used without being
1536 	 * initialized. To be fair, it's hard to guess.
1537 	 */
1538 	p0 = p1 = p2 = 0;
1539 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1540 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1541 				  ctx.central_freq, p0, p1, p2);
1542 
1543 	return true;
1544 }
1545 
1546 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1547 {
1548 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1549 	u32 ctrl1, cfgcr1, cfgcr2;
1550 	struct skl_wrpll_params wrpll_params = { 0, };
1551 
1552 	/*
1553 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1554 	 * as the DPLL id in this function.
1555 	 */
1556 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1557 
1558 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1559 
1560 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1561 				     i915->dpll.ref_clks.nssc,
1562 				     &wrpll_params))
1563 		return false;
1564 
1565 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1566 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1567 		wrpll_params.dco_integer;
1568 
1569 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1570 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1571 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1572 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1573 		wrpll_params.central_freq;
1574 
1575 	memset(&crtc_state->dpll_hw_state, 0,
1576 	       sizeof(crtc_state->dpll_hw_state));
1577 
1578 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1579 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1580 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1581 	return true;
1582 }
1583 
1584 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1585 				  const struct intel_shared_dpll *pll,
1586 				  const struct intel_dpll_hw_state *pll_state)
1587 {
1588 	int ref_clock = i915->dpll.ref_clks.nssc;
1589 	u32 p0, p1, p2, dco_freq;
1590 
1591 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1592 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1593 
1594 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1595 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1596 	else
1597 		p1 = 1;
1598 
1599 
1600 	switch (p0) {
1601 	case DPLL_CFGCR2_PDIV_1:
1602 		p0 = 1;
1603 		break;
1604 	case DPLL_CFGCR2_PDIV_2:
1605 		p0 = 2;
1606 		break;
1607 	case DPLL_CFGCR2_PDIV_3:
1608 		p0 = 3;
1609 		break;
1610 	case DPLL_CFGCR2_PDIV_7_INVALID:
1611 		/*
1612 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1613 		 * handling it the same way as PDIV_7.
1614 		 */
1615 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1616 		fallthrough;
1617 	case DPLL_CFGCR2_PDIV_7:
1618 		p0 = 7;
1619 		break;
1620 	default:
1621 		MISSING_CASE(p0);
1622 		return 0;
1623 	}
1624 
1625 	switch (p2) {
1626 	case DPLL_CFGCR2_KDIV_5:
1627 		p2 = 5;
1628 		break;
1629 	case DPLL_CFGCR2_KDIV_2:
1630 		p2 = 2;
1631 		break;
1632 	case DPLL_CFGCR2_KDIV_3:
1633 		p2 = 3;
1634 		break;
1635 	case DPLL_CFGCR2_KDIV_1:
1636 		p2 = 1;
1637 		break;
1638 	default:
1639 		MISSING_CASE(p2);
1640 		return 0;
1641 	}
1642 
1643 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1644 		   ref_clock;
1645 
1646 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1647 		    ref_clock / 0x8000;
1648 
1649 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1650 		return 0;
1651 
1652 	return dco_freq / (p0 * p1 * p2 * 5);
1653 }
1654 
1655 static bool
1656 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1657 {
1658 	u32 ctrl1;
1659 
1660 	/*
1661 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1662 	 * as the DPLL id in this function.
1663 	 */
1664 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1665 	switch (crtc_state->port_clock / 2) {
1666 	case 81000:
1667 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1668 		break;
1669 	case 135000:
1670 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1671 		break;
1672 	case 270000:
1673 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1674 		break;
1675 		/* eDP 1.4 rates */
1676 	case 162000:
1677 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1678 		break;
1679 	case 108000:
1680 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1681 		break;
1682 	case 216000:
1683 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1684 		break;
1685 	}
1686 
1687 	memset(&crtc_state->dpll_hw_state, 0,
1688 	       sizeof(crtc_state->dpll_hw_state));
1689 
1690 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1691 
1692 	return true;
1693 }
1694 
1695 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1696 				  const struct intel_shared_dpll *pll,
1697 				  const struct intel_dpll_hw_state *pll_state)
1698 {
1699 	int link_clock = 0;
1700 
1701 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1702 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1703 	case DPLL_CTRL1_LINK_RATE_810:
1704 		link_clock = 81000;
1705 		break;
1706 	case DPLL_CTRL1_LINK_RATE_1080:
1707 		link_clock = 108000;
1708 		break;
1709 	case DPLL_CTRL1_LINK_RATE_1350:
1710 		link_clock = 135000;
1711 		break;
1712 	case DPLL_CTRL1_LINK_RATE_1620:
1713 		link_clock = 162000;
1714 		break;
1715 	case DPLL_CTRL1_LINK_RATE_2160:
1716 		link_clock = 216000;
1717 		break;
1718 	case DPLL_CTRL1_LINK_RATE_2700:
1719 		link_clock = 270000;
1720 		break;
1721 	default:
1722 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1723 		break;
1724 	}
1725 
1726 	return link_clock * 2;
1727 }
1728 
1729 static bool skl_get_dpll(struct intel_atomic_state *state,
1730 			 struct intel_crtc *crtc,
1731 			 struct intel_encoder *encoder)
1732 {
1733 	struct intel_crtc_state *crtc_state =
1734 		intel_atomic_get_new_crtc_state(state, crtc);
1735 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1736 	struct intel_shared_dpll *pll;
1737 	bool bret;
1738 
1739 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1740 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1741 		if (!bret) {
1742 			drm_dbg_kms(&i915->drm,
1743 				    "Could not get HDMI pll dividers.\n");
1744 			return false;
1745 		}
1746 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1747 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1748 		if (!bret) {
1749 			drm_dbg_kms(&i915->drm,
1750 				    "Could not set DP dpll HW state.\n");
1751 			return false;
1752 		}
1753 	} else {
1754 		return false;
1755 	}
1756 
1757 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1758 		pll = intel_find_shared_dpll(state, crtc,
1759 					     &crtc_state->dpll_hw_state,
1760 					     BIT(DPLL_ID_SKL_DPLL0));
1761 	else
1762 		pll = intel_find_shared_dpll(state, crtc,
1763 					     &crtc_state->dpll_hw_state,
1764 					     BIT(DPLL_ID_SKL_DPLL3) |
1765 					     BIT(DPLL_ID_SKL_DPLL2) |
1766 					     BIT(DPLL_ID_SKL_DPLL1));
1767 	if (!pll)
1768 		return false;
1769 
1770 	intel_reference_shared_dpll(state, crtc,
1771 				    pll, &crtc_state->dpll_hw_state);
1772 
1773 	crtc_state->shared_dpll = pll;
1774 
1775 	return true;
1776 }
1777 
1778 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1779 				const struct intel_shared_dpll *pll,
1780 				const struct intel_dpll_hw_state *pll_state)
1781 {
1782 	/*
1783 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1784 	 * the internal shift for each field
1785 	 */
1786 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1787 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1788 	else
1789 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1790 }
1791 
1792 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1793 {
1794 	/* No SSC ref */
1795 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1796 }
1797 
1798 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1799 			      const struct intel_dpll_hw_state *hw_state)
1800 {
1801 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1802 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1803 		      hw_state->ctrl1,
1804 		      hw_state->cfgcr1,
1805 		      hw_state->cfgcr2);
1806 }
1807 
1808 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1809 	.enable = skl_ddi_pll_enable,
1810 	.disable = skl_ddi_pll_disable,
1811 	.get_hw_state = skl_ddi_pll_get_hw_state,
1812 	.get_freq = skl_ddi_pll_get_freq,
1813 };
1814 
1815 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1816 	.enable = skl_ddi_dpll0_enable,
1817 	.disable = skl_ddi_dpll0_disable,
1818 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1819 	.get_freq = skl_ddi_pll_get_freq,
1820 };
1821 
1822 static const struct dpll_info skl_plls[] = {
1823 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1824 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1825 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1826 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1827 	{ },
1828 };
1829 
1830 static const struct intel_dpll_mgr skl_pll_mgr = {
1831 	.dpll_info = skl_plls,
1832 	.get_dplls = skl_get_dpll,
1833 	.put_dplls = intel_put_dpll,
1834 	.update_ref_clks = skl_update_dpll_ref_clks,
1835 	.dump_hw_state = skl_dump_hw_state,
1836 };
1837 
1838 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1839 				struct intel_shared_dpll *pll)
1840 {
1841 	u32 temp;
1842 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1843 	enum dpio_phy phy;
1844 	enum dpio_channel ch;
1845 
1846 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1847 
1848 	/* Non-SSC reference */
1849 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1850 	temp |= PORT_PLL_REF_SEL;
1851 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1852 
1853 	if (IS_GEMINILAKE(dev_priv)) {
1854 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1855 		temp |= PORT_PLL_POWER_ENABLE;
1856 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1857 
1858 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1859 				 PORT_PLL_POWER_STATE), 200))
1860 			drm_err(&dev_priv->drm,
1861 				"Power state not set for PLL:%d\n", port);
1862 	}
1863 
1864 	/* Disable 10 bit clock */
1865 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1866 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1867 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1868 
1869 	/* Write P1 & P2 */
1870 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1871 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1872 	temp |= pll->state.hw_state.ebb0;
1873 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1874 
1875 	/* Write M2 integer */
1876 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1877 	temp &= ~PORT_PLL_M2_MASK;
1878 	temp |= pll->state.hw_state.pll0;
1879 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1880 
1881 	/* Write N */
1882 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1883 	temp &= ~PORT_PLL_N_MASK;
1884 	temp |= pll->state.hw_state.pll1;
1885 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1886 
1887 	/* Write M2 fraction */
1888 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1889 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1890 	temp |= pll->state.hw_state.pll2;
1891 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1892 
1893 	/* Write M2 fraction enable */
1894 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1895 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1896 	temp |= pll->state.hw_state.pll3;
1897 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1898 
1899 	/* Write coeff */
1900 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1901 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1902 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1903 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1904 	temp |= pll->state.hw_state.pll6;
1905 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1906 
1907 	/* Write calibration val */
1908 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1909 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1910 	temp |= pll->state.hw_state.pll8;
1911 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1912 
1913 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1914 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1915 	temp |= pll->state.hw_state.pll9;
1916 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1917 
1918 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1919 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1920 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1921 	temp |= pll->state.hw_state.pll10;
1922 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1923 
1924 	/* Recalibrate with new settings */
1925 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1926 	temp |= PORT_PLL_RECALIBRATE;
1927 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1928 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1929 	temp |= pll->state.hw_state.ebb4;
1930 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1931 
1932 	/* Enable PLL */
1933 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1934 	temp |= PORT_PLL_ENABLE;
1935 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1936 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1937 
1938 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1939 			200))
1940 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1941 
1942 	if (IS_GEMINILAKE(dev_priv)) {
1943 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1944 		temp |= DCC_DELAY_RANGE_2;
1945 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1946 	}
1947 
1948 	/*
1949 	 * While we write to the group register to program all lanes at once we
1950 	 * can read only lane registers and we pick lanes 0/1 for that.
1951 	 */
1952 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1953 	temp &= ~LANE_STAGGER_MASK;
1954 	temp &= ~LANESTAGGER_STRAP_OVRD;
1955 	temp |= pll->state.hw_state.pcsdw12;
1956 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1957 }
1958 
1959 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1960 					struct intel_shared_dpll *pll)
1961 {
1962 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1963 	u32 temp;
1964 
1965 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1966 	temp &= ~PORT_PLL_ENABLE;
1967 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1968 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1969 
1970 	if (IS_GEMINILAKE(dev_priv)) {
1971 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1972 		temp &= ~PORT_PLL_POWER_ENABLE;
1973 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1974 
1975 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1976 				  PORT_PLL_POWER_STATE), 200))
1977 			drm_err(&dev_priv->drm,
1978 				"Power state not reset for PLL:%d\n", port);
1979 	}
1980 }
1981 
1982 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1983 					struct intel_shared_dpll *pll,
1984 					struct intel_dpll_hw_state *hw_state)
1985 {
1986 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1987 	intel_wakeref_t wakeref;
1988 	enum dpio_phy phy;
1989 	enum dpio_channel ch;
1990 	u32 val;
1991 	bool ret;
1992 
1993 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1994 
1995 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1996 						     POWER_DOMAIN_DISPLAY_CORE);
1997 	if (!wakeref)
1998 		return false;
1999 
2000 	ret = false;
2001 
2002 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2003 	if (!(val & PORT_PLL_ENABLE))
2004 		goto out;
2005 
2006 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2007 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2008 
2009 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2010 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2011 
2012 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2013 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2014 
2015 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2016 	hw_state->pll1 &= PORT_PLL_N_MASK;
2017 
2018 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2019 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2020 
2021 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2022 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2023 
2024 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2025 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2026 			  PORT_PLL_INT_COEFF_MASK |
2027 			  PORT_PLL_GAIN_CTL_MASK;
2028 
2029 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2030 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2031 
2032 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2033 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2034 
2035 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2036 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2037 			   PORT_PLL_DCO_AMP_MASK;
2038 
2039 	/*
2040 	 * While we write to the group register to program all lanes at once we
2041 	 * can read only lane registers. We configure all lanes the same way, so
2042 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2043 	 */
2044 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2045 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2046 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2047 		drm_dbg(&dev_priv->drm,
2048 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2049 			hw_state->pcsdw12,
2050 			intel_de_read(dev_priv,
2051 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2052 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2053 
2054 	ret = true;
2055 
2056 out:
2057 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2058 
2059 	return ret;
2060 }
2061 
2062 /* bxt clock parameters */
2063 struct bxt_clk_div {
2064 	int clock;
2065 	u32 p1;
2066 	u32 p2;
2067 	u32 m2_int;
2068 	u32 m2_frac;
2069 	bool m2_frac_en;
2070 	u32 n;
2071 
2072 	int vco;
2073 };
2074 
2075 /* pre-calculated values for DP linkrates */
2076 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2077 	{162000, 4, 2, 32, 1677722, 1, 1},
2078 	{270000, 4, 1, 27,       0, 0, 1},
2079 	{540000, 2, 1, 27,       0, 0, 1},
2080 	{216000, 3, 2, 32, 1677722, 1, 1},
2081 	{243000, 4, 1, 24, 1258291, 1, 1},
2082 	{324000, 4, 1, 32, 1677722, 1, 1},
2083 	{432000, 3, 1, 32, 1677722, 1, 1}
2084 };
2085 
2086 static bool
2087 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2088 			  struct bxt_clk_div *clk_div)
2089 {
2090 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2091 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2092 	struct dpll best_clock;
2093 
2094 	/* Calculate HDMI div */
2095 	/*
2096 	 * FIXME: tie the following calculation into
2097 	 * i9xx_crtc_compute_clock
2098 	 */
2099 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2100 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2101 			crtc_state->port_clock,
2102 			pipe_name(crtc->pipe));
2103 		return false;
2104 	}
2105 
2106 	clk_div->p1 = best_clock.p1;
2107 	clk_div->p2 = best_clock.p2;
2108 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2109 	clk_div->n = best_clock.n;
2110 	clk_div->m2_int = best_clock.m2 >> 22;
2111 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2112 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2113 
2114 	clk_div->vco = best_clock.vco;
2115 
2116 	return true;
2117 }
2118 
2119 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2120 				    struct bxt_clk_div *clk_div)
2121 {
2122 	int clock = crtc_state->port_clock;
2123 	int i;
2124 
2125 	*clk_div = bxt_dp_clk_val[0];
2126 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2127 		if (bxt_dp_clk_val[i].clock == clock) {
2128 			*clk_div = bxt_dp_clk_val[i];
2129 			break;
2130 		}
2131 	}
2132 
2133 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2134 }
2135 
2136 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2137 				      const struct bxt_clk_div *clk_div)
2138 {
2139 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2140 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2141 	int clock = crtc_state->port_clock;
2142 	int vco = clk_div->vco;
2143 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2144 	u32 lanestagger;
2145 
2146 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2147 
2148 	if (vco >= 6200000 && vco <= 6700000) {
2149 		prop_coef = 4;
2150 		int_coef = 9;
2151 		gain_ctl = 3;
2152 		targ_cnt = 8;
2153 	} else if ((vco > 5400000 && vco < 6200000) ||
2154 			(vco >= 4800000 && vco < 5400000)) {
2155 		prop_coef = 5;
2156 		int_coef = 11;
2157 		gain_ctl = 3;
2158 		targ_cnt = 9;
2159 	} else if (vco == 5400000) {
2160 		prop_coef = 3;
2161 		int_coef = 8;
2162 		gain_ctl = 1;
2163 		targ_cnt = 9;
2164 	} else {
2165 		drm_err(&i915->drm, "Invalid VCO\n");
2166 		return false;
2167 	}
2168 
2169 	if (clock > 270000)
2170 		lanestagger = 0x18;
2171 	else if (clock > 135000)
2172 		lanestagger = 0x0d;
2173 	else if (clock > 67000)
2174 		lanestagger = 0x07;
2175 	else if (clock > 33000)
2176 		lanestagger = 0x04;
2177 	else
2178 		lanestagger = 0x02;
2179 
2180 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2181 	dpll_hw_state->pll0 = clk_div->m2_int;
2182 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2183 	dpll_hw_state->pll2 = clk_div->m2_frac;
2184 
2185 	if (clk_div->m2_frac_en)
2186 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2187 
2188 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2189 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2190 
2191 	dpll_hw_state->pll8 = targ_cnt;
2192 
2193 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2194 
2195 	dpll_hw_state->pll10 =
2196 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2197 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2198 
2199 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2200 
2201 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2202 
2203 	return true;
2204 }
2205 
2206 static bool
2207 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2208 {
2209 	struct bxt_clk_div clk_div = {};
2210 
2211 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2212 
2213 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2214 }
2215 
2216 static bool
2217 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2218 {
2219 	struct bxt_clk_div clk_div = {};
2220 
2221 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2222 
2223 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2224 }
2225 
2226 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2227 				const struct intel_shared_dpll *pll,
2228 				const struct intel_dpll_hw_state *pll_state)
2229 {
2230 	struct dpll clock;
2231 
2232 	clock.m1 = 2;
2233 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2234 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2235 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2236 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2237 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2238 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2239 
2240 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2241 }
2242 
2243 static bool bxt_get_dpll(struct intel_atomic_state *state,
2244 			 struct intel_crtc *crtc,
2245 			 struct intel_encoder *encoder)
2246 {
2247 	struct intel_crtc_state *crtc_state =
2248 		intel_atomic_get_new_crtc_state(state, crtc);
2249 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2250 	struct intel_shared_dpll *pll;
2251 	enum intel_dpll_id id;
2252 
2253 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2254 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2255 		return false;
2256 
2257 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2258 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2259 		return false;
2260 
2261 	/* 1:1 mapping between ports and PLLs */
2262 	id = (enum intel_dpll_id) encoder->port;
2263 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2264 
2265 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2266 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2267 
2268 	intel_reference_shared_dpll(state, crtc,
2269 				    pll, &crtc_state->dpll_hw_state);
2270 
2271 	crtc_state->shared_dpll = pll;
2272 
2273 	return true;
2274 }
2275 
2276 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2277 {
2278 	i915->dpll.ref_clks.ssc = 100000;
2279 	i915->dpll.ref_clks.nssc = 100000;
2280 	/* DSI non-SSC ref 19.2MHz */
2281 }
2282 
2283 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2284 			      const struct intel_dpll_hw_state *hw_state)
2285 {
2286 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2287 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2288 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2289 		    hw_state->ebb0,
2290 		    hw_state->ebb4,
2291 		    hw_state->pll0,
2292 		    hw_state->pll1,
2293 		    hw_state->pll2,
2294 		    hw_state->pll3,
2295 		    hw_state->pll6,
2296 		    hw_state->pll8,
2297 		    hw_state->pll9,
2298 		    hw_state->pll10,
2299 		    hw_state->pcsdw12);
2300 }
2301 
2302 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2303 	.enable = bxt_ddi_pll_enable,
2304 	.disable = bxt_ddi_pll_disable,
2305 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2306 	.get_freq = bxt_ddi_pll_get_freq,
2307 };
2308 
2309 static const struct dpll_info bxt_plls[] = {
2310 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2311 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2312 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2313 	{ },
2314 };
2315 
2316 static const struct intel_dpll_mgr bxt_pll_mgr = {
2317 	.dpll_info = bxt_plls,
2318 	.get_dplls = bxt_get_dpll,
2319 	.put_dplls = intel_put_dpll,
2320 	.update_ref_clks = bxt_update_dpll_ref_clks,
2321 	.dump_hw_state = bxt_dump_hw_state,
2322 };
2323 
2324 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2325 			       struct intel_shared_dpll *pll)
2326 {
2327 	const enum intel_dpll_id id = pll->info->id;
2328 	u32 val;
2329 
2330 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2331 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2332 	val |= PLL_POWER_ENABLE;
2333 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2334 
2335 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2336 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2337 				  PLL_POWER_STATE, 5))
2338 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2339 
2340 	/*
2341 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2342 	 * select DP mode, and set DP link rate.
2343 	 */
2344 	val = pll->state.hw_state.cfgcr0;
2345 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2346 
2347 	/* 4. Reab back to ensure writes completed */
2348 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2349 
2350 	/* 3. Configure DPLL_CFGCR0 */
2351 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2352 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2353 		val = pll->state.hw_state.cfgcr1;
2354 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2355 		/* 4. Reab back to ensure writes completed */
2356 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2357 	}
2358 
2359 	/*
2360 	 * 5. If the frequency will result in a change to the voltage
2361 	 * requirement, follow the Display Voltage Frequency Switching
2362 	 * Sequence Before Frequency Change
2363 	 *
2364 	 * Note: DVFS is actually handled via the cdclk code paths,
2365 	 * hence we do nothing here.
2366 	 */
2367 
2368 	/* 6. Enable DPLL in DPLL_ENABLE. */
2369 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2370 	val |= PLL_ENABLE;
2371 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2372 
2373 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2374 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2375 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2376 
2377 	/*
2378 	 * 8. If the frequency will result in a change to the voltage
2379 	 * requirement, follow the Display Voltage Frequency Switching
2380 	 * Sequence After Frequency Change
2381 	 *
2382 	 * Note: DVFS is actually handled via the cdclk code paths,
2383 	 * hence we do nothing here.
2384 	 */
2385 
2386 	/*
2387 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2388 	 * Done at intel_ddi_clk_select
2389 	 */
2390 }
2391 
2392 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2393 				struct intel_shared_dpll *pll)
2394 {
2395 	const enum intel_dpll_id id = pll->info->id;
2396 	u32 val;
2397 
2398 	/*
2399 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2400 	 * Done at intel_ddi_post_disable
2401 	 */
2402 
2403 	/*
2404 	 * 2. If the frequency will result in a change to the voltage
2405 	 * requirement, follow the Display Voltage Frequency Switching
2406 	 * Sequence Before Frequency Change
2407 	 *
2408 	 * Note: DVFS is actually handled via the cdclk code paths,
2409 	 * hence we do nothing here.
2410 	 */
2411 
2412 	/* 3. Disable DPLL through DPLL_ENABLE. */
2413 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2414 	val &= ~PLL_ENABLE;
2415 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2416 
2417 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2418 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2419 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2420 
2421 	/*
2422 	 * 5. If the frequency will result in a change to the voltage
2423 	 * requirement, follow the Display Voltage Frequency Switching
2424 	 * Sequence After Frequency Change
2425 	 *
2426 	 * Note: DVFS is actually handled via the cdclk code paths,
2427 	 * hence we do nothing here.
2428 	 */
2429 
2430 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2431 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2432 	val &= ~PLL_POWER_ENABLE;
2433 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2434 
2435 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2436 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2437 				    PLL_POWER_STATE, 5))
2438 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2439 }
2440 
2441 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2442 				     struct intel_shared_dpll *pll,
2443 				     struct intel_dpll_hw_state *hw_state)
2444 {
2445 	const enum intel_dpll_id id = pll->info->id;
2446 	intel_wakeref_t wakeref;
2447 	u32 val;
2448 	bool ret;
2449 
2450 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2451 						     POWER_DOMAIN_DISPLAY_CORE);
2452 	if (!wakeref)
2453 		return false;
2454 
2455 	ret = false;
2456 
2457 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2458 	if (!(val & PLL_ENABLE))
2459 		goto out;
2460 
2461 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2462 	hw_state->cfgcr0 = val;
2463 
2464 	/* avoid reading back stale values if HDMI mode is not enabled */
2465 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2466 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2467 						 CNL_DPLL_CFGCR1(id));
2468 	}
2469 	ret = true;
2470 
2471 out:
2472 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2473 
2474 	return ret;
2475 }
2476 
2477 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2478 				      int *qdiv, int *kdiv)
2479 {
2480 	/* even dividers */
2481 	if (bestdiv % 2 == 0) {
2482 		if (bestdiv == 2) {
2483 			*pdiv = 2;
2484 			*qdiv = 1;
2485 			*kdiv = 1;
2486 		} else if (bestdiv % 4 == 0) {
2487 			*pdiv = 2;
2488 			*qdiv = bestdiv / 4;
2489 			*kdiv = 2;
2490 		} else if (bestdiv % 6 == 0) {
2491 			*pdiv = 3;
2492 			*qdiv = bestdiv / 6;
2493 			*kdiv = 2;
2494 		} else if (bestdiv % 5 == 0) {
2495 			*pdiv = 5;
2496 			*qdiv = bestdiv / 10;
2497 			*kdiv = 2;
2498 		} else if (bestdiv % 14 == 0) {
2499 			*pdiv = 7;
2500 			*qdiv = bestdiv / 14;
2501 			*kdiv = 2;
2502 		}
2503 	} else {
2504 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2505 			*pdiv = bestdiv;
2506 			*qdiv = 1;
2507 			*kdiv = 1;
2508 		} else { /* 9, 15, 21 */
2509 			*pdiv = bestdiv / 3;
2510 			*qdiv = 1;
2511 			*kdiv = 3;
2512 		}
2513 	}
2514 }
2515 
2516 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2517 				      u32 dco_freq, u32 ref_freq,
2518 				      int pdiv, int qdiv, int kdiv)
2519 {
2520 	u32 dco;
2521 
2522 	switch (kdiv) {
2523 	case 1:
2524 		params->kdiv = 1;
2525 		break;
2526 	case 2:
2527 		params->kdiv = 2;
2528 		break;
2529 	case 3:
2530 		params->kdiv = 4;
2531 		break;
2532 	default:
2533 		WARN(1, "Incorrect KDiv\n");
2534 	}
2535 
2536 	switch (pdiv) {
2537 	case 2:
2538 		params->pdiv = 1;
2539 		break;
2540 	case 3:
2541 		params->pdiv = 2;
2542 		break;
2543 	case 5:
2544 		params->pdiv = 4;
2545 		break;
2546 	case 7:
2547 		params->pdiv = 8;
2548 		break;
2549 	default:
2550 		WARN(1, "Incorrect PDiv\n");
2551 	}
2552 
2553 	WARN_ON(kdiv != 2 && qdiv != 1);
2554 
2555 	params->qdiv_ratio = qdiv;
2556 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2557 
2558 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2559 
2560 	params->dco_integer = dco >> 15;
2561 	params->dco_fraction = dco & 0x7fff;
2562 }
2563 
2564 static bool
2565 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2566 			  struct skl_wrpll_params *wrpll_params,
2567 			  int ref_clock)
2568 {
2569 	u32 afe_clock = crtc_state->port_clock * 5;
2570 	u32 dco_min = 7998000;
2571 	u32 dco_max = 10000000;
2572 	u32 dco_mid = (dco_min + dco_max) / 2;
2573 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2574 					 18, 20, 24, 28, 30, 32,  36,  40,
2575 					 42, 44, 48, 50, 52, 54,  56,  60,
2576 					 64, 66, 68, 70, 72, 76,  78,  80,
2577 					 84, 88, 90, 92, 96, 98, 100, 102,
2578 					  3,  5,  7,  9, 15, 21 };
2579 	u32 dco, best_dco = 0, dco_centrality = 0;
2580 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2581 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2582 
2583 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2584 		dco = afe_clock * dividers[d];
2585 
2586 		if ((dco <= dco_max) && (dco >= dco_min)) {
2587 			dco_centrality = abs(dco - dco_mid);
2588 
2589 			if (dco_centrality < best_dco_centrality) {
2590 				best_dco_centrality = dco_centrality;
2591 				best_div = dividers[d];
2592 				best_dco = dco;
2593 			}
2594 		}
2595 	}
2596 
2597 	if (best_div == 0)
2598 		return false;
2599 
2600 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2601 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2602 				  pdiv, qdiv, kdiv);
2603 
2604 	return true;
2605 }
2606 
2607 static bool
2608 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2609 			struct skl_wrpll_params *wrpll_params)
2610 {
2611 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2612 
2613 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2614 					 i915->dpll.ref_clks.nssc);
2615 }
2616 
2617 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2618 {
2619 	u32 cfgcr0, cfgcr1;
2620 	struct skl_wrpll_params wrpll_params = { 0, };
2621 
2622 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2623 
2624 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2625 		return false;
2626 
2627 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2628 		wrpll_params.dco_integer;
2629 
2630 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2631 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2632 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2633 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2634 		DPLL_CFGCR1_CENTRAL_FREQ;
2635 
2636 	memset(&crtc_state->dpll_hw_state, 0,
2637 	       sizeof(crtc_state->dpll_hw_state));
2638 
2639 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2640 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2641 	return true;
2642 }
2643 
2644 /*
2645  * Display WA #22010492432: ehl, tgl
2646  * Program half of the nominal DCO divider fraction value.
2647  */
2648 static bool
2649 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2650 {
2651 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2652 		 IS_JSL_EHL_REVID(i915, EHL_REVID_B0, REVID_FOREVER)) ||
2653 		 IS_TIGERLAKE(i915)) &&
2654 		 i915->dpll.ref_clks.nssc == 38400;
2655 }
2656 
2657 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2658 				    const struct intel_shared_dpll *pll,
2659 				    const struct intel_dpll_hw_state *pll_state,
2660 				    int ref_clock)
2661 {
2662 	u32 dco_fraction;
2663 	u32 p0, p1, p2, dco_freq;
2664 
2665 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2666 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2667 
2668 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2669 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2670 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2671 	else
2672 		p1 = 1;
2673 
2674 
2675 	switch (p0) {
2676 	case DPLL_CFGCR1_PDIV_2:
2677 		p0 = 2;
2678 		break;
2679 	case DPLL_CFGCR1_PDIV_3:
2680 		p0 = 3;
2681 		break;
2682 	case DPLL_CFGCR1_PDIV_5:
2683 		p0 = 5;
2684 		break;
2685 	case DPLL_CFGCR1_PDIV_7:
2686 		p0 = 7;
2687 		break;
2688 	}
2689 
2690 	switch (p2) {
2691 	case DPLL_CFGCR1_KDIV_1:
2692 		p2 = 1;
2693 		break;
2694 	case DPLL_CFGCR1_KDIV_2:
2695 		p2 = 2;
2696 		break;
2697 	case DPLL_CFGCR1_KDIV_3:
2698 		p2 = 3;
2699 		break;
2700 	}
2701 
2702 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2703 		   ref_clock;
2704 
2705 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2706 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2707 
2708 	if (ehl_combo_pll_div_frac_wa_needed(dev_priv))
2709 		dco_fraction *= 2;
2710 
2711 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2712 
2713 	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2714 		return 0;
2715 
2716 	return dco_freq / (p0 * p1 * p2 * 5);
2717 }
2718 
2719 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2720 				  const struct intel_shared_dpll *pll,
2721 				  const struct intel_dpll_hw_state *pll_state)
2722 {
2723 	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
2724 					i915->dpll.ref_clks.nssc);
2725 }
2726 
2727 static bool
2728 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2729 {
2730 	u32 cfgcr0;
2731 
2732 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2733 
2734 	switch (crtc_state->port_clock / 2) {
2735 	case 81000:
2736 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2737 		break;
2738 	case 135000:
2739 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2740 		break;
2741 	case 270000:
2742 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2743 		break;
2744 		/* eDP 1.4 rates */
2745 	case 162000:
2746 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2747 		break;
2748 	case 108000:
2749 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2750 		break;
2751 	case 216000:
2752 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2753 		break;
2754 	case 324000:
2755 		/* Some SKUs may require elevated I/O voltage to support this */
2756 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2757 		break;
2758 	case 405000:
2759 		/* Some SKUs may require elevated I/O voltage to support this */
2760 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2761 		break;
2762 	}
2763 
2764 	memset(&crtc_state->dpll_hw_state, 0,
2765 	       sizeof(crtc_state->dpll_hw_state));
2766 
2767 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2768 
2769 	return true;
2770 }
2771 
2772 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2773 				  const struct intel_shared_dpll *pll,
2774 				  const struct intel_dpll_hw_state *pll_state)
2775 {
2776 	int link_clock = 0;
2777 
2778 	switch (pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2779 	case DPLL_CFGCR0_LINK_RATE_810:
2780 		link_clock = 81000;
2781 		break;
2782 	case DPLL_CFGCR0_LINK_RATE_1080:
2783 		link_clock = 108000;
2784 		break;
2785 	case DPLL_CFGCR0_LINK_RATE_1350:
2786 		link_clock = 135000;
2787 		break;
2788 	case DPLL_CFGCR0_LINK_RATE_1620:
2789 		link_clock = 162000;
2790 		break;
2791 	case DPLL_CFGCR0_LINK_RATE_2160:
2792 		link_clock = 216000;
2793 		break;
2794 	case DPLL_CFGCR0_LINK_RATE_2700:
2795 		link_clock = 270000;
2796 		break;
2797 	case DPLL_CFGCR0_LINK_RATE_3240:
2798 		link_clock = 324000;
2799 		break;
2800 	case DPLL_CFGCR0_LINK_RATE_4050:
2801 		link_clock = 405000;
2802 		break;
2803 	default:
2804 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2805 		break;
2806 	}
2807 
2808 	return link_clock * 2;
2809 }
2810 
2811 static bool cnl_get_dpll(struct intel_atomic_state *state,
2812 			 struct intel_crtc *crtc,
2813 			 struct intel_encoder *encoder)
2814 {
2815 	struct intel_crtc_state *crtc_state =
2816 		intel_atomic_get_new_crtc_state(state, crtc);
2817 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2818 	struct intel_shared_dpll *pll;
2819 	bool bret;
2820 
2821 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2822 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2823 		if (!bret) {
2824 			drm_dbg_kms(&i915->drm,
2825 				    "Could not get HDMI pll dividers.\n");
2826 			return false;
2827 		}
2828 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2829 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2830 		if (!bret) {
2831 			drm_dbg_kms(&i915->drm,
2832 				    "Could not set DP dpll HW state.\n");
2833 			return false;
2834 		}
2835 	} else {
2836 		drm_dbg_kms(&i915->drm,
2837 			    "Skip DPLL setup for output_types 0x%x\n",
2838 			    crtc_state->output_types);
2839 		return false;
2840 	}
2841 
2842 	pll = intel_find_shared_dpll(state, crtc,
2843 				     &crtc_state->dpll_hw_state,
2844 				     BIT(DPLL_ID_SKL_DPLL2) |
2845 				     BIT(DPLL_ID_SKL_DPLL1) |
2846 				     BIT(DPLL_ID_SKL_DPLL0));
2847 	if (!pll) {
2848 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2849 		return false;
2850 	}
2851 
2852 	intel_reference_shared_dpll(state, crtc,
2853 				    pll, &crtc_state->dpll_hw_state);
2854 
2855 	crtc_state->shared_dpll = pll;
2856 
2857 	return true;
2858 }
2859 
2860 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2861 				const struct intel_shared_dpll *pll,
2862 				const struct intel_dpll_hw_state *pll_state)
2863 {
2864 	if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2865 		return cnl_ddi_wrpll_get_freq(i915, pll, pll_state);
2866 	else
2867 		return cnl_ddi_lcpll_get_freq(i915, pll, pll_state);
2868 }
2869 
2870 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2871 {
2872 	/* No SSC reference */
2873 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2874 }
2875 
2876 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2877 			      const struct intel_dpll_hw_state *hw_state)
2878 {
2879 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2880 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2881 		    hw_state->cfgcr0,
2882 		    hw_state->cfgcr1);
2883 }
2884 
2885 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2886 	.enable = cnl_ddi_pll_enable,
2887 	.disable = cnl_ddi_pll_disable,
2888 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2889 	.get_freq = cnl_ddi_pll_get_freq,
2890 };
2891 
2892 static const struct dpll_info cnl_plls[] = {
2893 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2894 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2895 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2896 	{ },
2897 };
2898 
2899 static const struct intel_dpll_mgr cnl_pll_mgr = {
2900 	.dpll_info = cnl_plls,
2901 	.get_dplls = cnl_get_dpll,
2902 	.put_dplls = intel_put_dpll,
2903 	.update_ref_clks = cnl_update_dpll_ref_clks,
2904 	.dump_hw_state = cnl_dump_hw_state,
2905 };
2906 
2907 struct icl_combo_pll_params {
2908 	int clock;
2909 	struct skl_wrpll_params wrpll;
2910 };
2911 
2912 /*
2913  * These values alrea already adjusted: they're the bits we write to the
2914  * registers, not the logical values.
2915  */
2916 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2917 	{ 540000,
2918 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2919 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2920 	{ 270000,
2921 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2922 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2923 	{ 162000,
2924 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2925 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2926 	{ 324000,
2927 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2928 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2929 	{ 216000,
2930 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2931 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2932 	{ 432000,
2933 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2934 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2935 	{ 648000,
2936 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2937 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2938 	{ 810000,
2939 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2940 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2941 };
2942 
2943 
2944 /* Also used for 38.4 MHz values. */
2945 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2946 	{ 540000,
2947 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2948 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2949 	{ 270000,
2950 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2951 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2952 	{ 162000,
2953 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2954 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2955 	{ 324000,
2956 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2957 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2958 	{ 216000,
2959 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2960 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2961 	{ 432000,
2962 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2963 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2964 	{ 648000,
2965 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2966 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2967 	{ 810000,
2968 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2969 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2970 };
2971 
2972 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2973 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2974 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2975 };
2976 
2977 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2978 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2979 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2980 };
2981 
2982 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2983 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2984 	/* the following params are unused */
2985 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2986 };
2987 
2988 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2989 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2990 	/* the following params are unused */
2991 };
2992 
2993 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2994 				  struct skl_wrpll_params *pll_params)
2995 {
2996 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2997 	const struct icl_combo_pll_params *params =
2998 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2999 		icl_dp_combo_pll_24MHz_values :
3000 		icl_dp_combo_pll_19_2MHz_values;
3001 	int clock = crtc_state->port_clock;
3002 	int i;
3003 
3004 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
3005 		if (clock == params[i].clock) {
3006 			*pll_params = params[i].wrpll;
3007 			return true;
3008 		}
3009 	}
3010 
3011 	MISSING_CASE(clock);
3012 	return false;
3013 }
3014 
3015 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
3016 			     struct skl_wrpll_params *pll_params)
3017 {
3018 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3019 
3020 	if (DISPLAY_VER(dev_priv) >= 12) {
3021 		switch (dev_priv->dpll.ref_clks.nssc) {
3022 		default:
3023 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3024 			fallthrough;
3025 		case 19200:
3026 		case 38400:
3027 			*pll_params = tgl_tbt_pll_19_2MHz_values;
3028 			break;
3029 		case 24000:
3030 			*pll_params = tgl_tbt_pll_24MHz_values;
3031 			break;
3032 		}
3033 	} else {
3034 		switch (dev_priv->dpll.ref_clks.nssc) {
3035 		default:
3036 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3037 			fallthrough;
3038 		case 19200:
3039 		case 38400:
3040 			*pll_params = icl_tbt_pll_19_2MHz_values;
3041 			break;
3042 		case 24000:
3043 			*pll_params = icl_tbt_pll_24MHz_values;
3044 			break;
3045 		}
3046 	}
3047 
3048 	return true;
3049 }
3050 
3051 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3052 				    const struct intel_shared_dpll *pll,
3053 				    const struct intel_dpll_hw_state *pll_state)
3054 {
3055 	/*
3056 	 * The PLL outputs multiple frequencies at the same time, selection is
3057 	 * made at DDI clock mux level.
3058 	 */
3059 	drm_WARN_ON(&i915->drm, 1);
3060 
3061 	return 0;
3062 }
3063 
3064 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3065 {
3066 	int ref_clock = i915->dpll.ref_clks.nssc;
3067 
3068 	/*
3069 	 * For ICL+, the spec states: if reference frequency is 38.4,
3070 	 * use 19.2 because the DPLL automatically divides that by 2.
3071 	 */
3072 	if (ref_clock == 38400)
3073 		ref_clock = 19200;
3074 
3075 	return ref_clock;
3076 }
3077 
3078 static bool
3079 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3080 	       struct skl_wrpll_params *wrpll_params)
3081 {
3082 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3083 
3084 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3085 					 icl_wrpll_ref_clock(i915));
3086 }
3087 
3088 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3089 				      const struct intel_shared_dpll *pll,
3090 				      const struct intel_dpll_hw_state *pll_state)
3091 {
3092 	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
3093 					icl_wrpll_ref_clock(i915));
3094 }
3095 
3096 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3097 				const struct skl_wrpll_params *pll_params,
3098 				struct intel_dpll_hw_state *pll_state)
3099 {
3100 	u32 dco_fraction = pll_params->dco_fraction;
3101 
3102 	memset(pll_state, 0, sizeof(*pll_state));
3103 
3104 	if (ehl_combo_pll_div_frac_wa_needed(i915))
3105 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3106 
3107 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3108 			    pll_params->dco_integer;
3109 
3110 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3111 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3112 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3113 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3114 
3115 	if (DISPLAY_VER(i915) >= 12)
3116 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3117 	else
3118 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3119 }
3120 
3121 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3122 {
3123 	return id - DPLL_ID_ICL_MGPLL1;
3124 }
3125 
3126 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3127 {
3128 	return tc_port + DPLL_ID_ICL_MGPLL1;
3129 }
3130 
3131 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3132 				     u32 *target_dco_khz,
3133 				     struct intel_dpll_hw_state *state,
3134 				     bool is_dkl)
3135 {
3136 	u32 dco_min_freq, dco_max_freq;
3137 	int div1_vals[] = {7, 5, 3, 2};
3138 	unsigned int i;
3139 	int div2;
3140 
3141 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3142 	dco_max_freq = is_dp ? 8100000 : 10000000;
3143 
3144 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3145 		int div1 = div1_vals[i];
3146 
3147 		for (div2 = 10; div2 > 0; div2--) {
3148 			int dco = div1 * div2 * clock_khz * 5;
3149 			int a_divratio, tlinedrv, inputsel;
3150 			u32 hsdiv;
3151 
3152 			if (dco < dco_min_freq || dco > dco_max_freq)
3153 				continue;
3154 
3155 			if (div2 >= 2) {
3156 				/*
3157 				 * Note: a_divratio not matching TGL BSpec
3158 				 * algorithm but matching hardcoded values and
3159 				 * working on HW for DP alt-mode at least
3160 				 */
3161 				a_divratio = is_dp ? 10 : 5;
3162 				tlinedrv = is_dkl ? 1 : 2;
3163 			} else {
3164 				a_divratio = 5;
3165 				tlinedrv = 0;
3166 			}
3167 			inputsel = is_dp ? 0 : 1;
3168 
3169 			switch (div1) {
3170 			default:
3171 				MISSING_CASE(div1);
3172 				fallthrough;
3173 			case 2:
3174 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3175 				break;
3176 			case 3:
3177 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3178 				break;
3179 			case 5:
3180 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3181 				break;
3182 			case 7:
3183 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3184 				break;
3185 			}
3186 
3187 			*target_dco_khz = dco;
3188 
3189 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3190 
3191 			state->mg_clktop2_coreclkctl1 =
3192 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3193 
3194 			state->mg_clktop2_hsclkctl =
3195 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3196 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3197 				hsdiv |
3198 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3199 
3200 			return true;
3201 		}
3202 	}
3203 
3204 	return false;
3205 }
3206 
3207 /*
3208  * The specification for this function uses real numbers, so the math had to be
3209  * adapted to integer-only calculation, that's why it looks so different.
3210  */
3211 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3212 				  struct intel_dpll_hw_state *pll_state)
3213 {
3214 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3215 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3216 	int clock = crtc_state->port_clock;
3217 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3218 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3219 	u32 prop_coeff, int_coeff;
3220 	u32 tdc_targetcnt, feedfwgain;
3221 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3222 	u64 tmp;
3223 	bool use_ssc = false;
3224 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3225 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
3226 
3227 	memset(pll_state, 0, sizeof(*pll_state));
3228 
3229 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3230 				      pll_state, is_dkl)) {
3231 		drm_dbg_kms(&dev_priv->drm,
3232 			    "Failed to find divisors for clock %d\n", clock);
3233 		return false;
3234 	}
3235 
3236 	m1div = 2;
3237 	m2div_int = dco_khz / (refclk_khz * m1div);
3238 	if (m2div_int > 255) {
3239 		if (!is_dkl) {
3240 			m1div = 4;
3241 			m2div_int = dco_khz / (refclk_khz * m1div);
3242 		}
3243 
3244 		if (m2div_int > 255) {
3245 			drm_dbg_kms(&dev_priv->drm,
3246 				    "Failed to find mdiv for clock %d\n",
3247 				    clock);
3248 			return false;
3249 		}
3250 	}
3251 	m2div_rem = dco_khz % (refclk_khz * m1div);
3252 
3253 	tmp = (u64)m2div_rem * (1 << 22);
3254 	do_div(tmp, refclk_khz * m1div);
3255 	m2div_frac = tmp;
3256 
3257 	switch (refclk_khz) {
3258 	case 19200:
3259 		iref_ndiv = 1;
3260 		iref_trim = 28;
3261 		iref_pulse_w = 1;
3262 		break;
3263 	case 24000:
3264 		iref_ndiv = 1;
3265 		iref_trim = 25;
3266 		iref_pulse_w = 2;
3267 		break;
3268 	case 38400:
3269 		iref_ndiv = 2;
3270 		iref_trim = 28;
3271 		iref_pulse_w = 1;
3272 		break;
3273 	default:
3274 		MISSING_CASE(refclk_khz);
3275 		return false;
3276 	}
3277 
3278 	/*
3279 	 * tdc_res = 0.000003
3280 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3281 	 *
3282 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3283 	 * was supposed to be a division, but we rearranged the operations of
3284 	 * the formula to avoid early divisions so we don't multiply the
3285 	 * rounding errors.
3286 	 *
3287 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3288 	 * we also rearrange to work with integers.
3289 	 *
3290 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3291 	 * last division by 10.
3292 	 */
3293 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3294 
3295 	/*
3296 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3297 	 * 32 bits. That's not a problem since we round the division down
3298 	 * anyway.
3299 	 */
3300 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3301 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3302 
3303 	if (dco_khz >= 9000000) {
3304 		prop_coeff = 5;
3305 		int_coeff = 10;
3306 	} else {
3307 		prop_coeff = 4;
3308 		int_coeff = 8;
3309 	}
3310 
3311 	if (use_ssc) {
3312 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3313 		do_div(tmp, refclk_khz * m1div * 10000);
3314 		ssc_stepsize = tmp;
3315 
3316 		tmp = mul_u32_u32(dco_khz, 1000);
3317 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3318 	} else {
3319 		ssc_stepsize = 0;
3320 		ssc_steplen = 0;
3321 	}
3322 	ssc_steplog = 4;
3323 
3324 	/* write pll_state calculations */
3325 	if (is_dkl) {
3326 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3327 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3328 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3329 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3330 
3331 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3332 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3333 
3334 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3335 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3336 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3337 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3338 
3339 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3340 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3341 
3342 		pll_state->mg_pll_tdc_coldst_bias =
3343 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3344 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3345 
3346 	} else {
3347 		pll_state->mg_pll_div0 =
3348 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3349 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3350 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3351 
3352 		pll_state->mg_pll_div1 =
3353 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3354 			MG_PLL_DIV1_DITHER_DIV_2 |
3355 			MG_PLL_DIV1_NDIVRATIO(1) |
3356 			MG_PLL_DIV1_FBPREDIV(m1div);
3357 
3358 		pll_state->mg_pll_lf =
3359 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3360 			MG_PLL_LF_AFCCNTSEL_512 |
3361 			MG_PLL_LF_GAINCTRL(1) |
3362 			MG_PLL_LF_INT_COEFF(int_coeff) |
3363 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3364 
3365 		pll_state->mg_pll_frac_lock =
3366 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3367 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3368 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3369 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3370 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3371 		if (use_ssc || m2div_rem > 0)
3372 			pll_state->mg_pll_frac_lock |=
3373 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3374 
3375 		pll_state->mg_pll_ssc =
3376 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3377 			MG_PLL_SSC_TYPE(2) |
3378 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3379 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3380 			MG_PLL_SSC_FLLEN |
3381 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3382 
3383 		pll_state->mg_pll_tdc_coldst_bias =
3384 			MG_PLL_TDC_COLDST_COLDSTART |
3385 			MG_PLL_TDC_COLDST_IREFINT_EN |
3386 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3387 			MG_PLL_TDC_TDCOVCCORR_EN |
3388 			MG_PLL_TDC_TDCSEL(3);
3389 
3390 		pll_state->mg_pll_bias =
3391 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3392 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3393 			MG_PLL_BIAS_BIAS_BONUS(10) |
3394 			MG_PLL_BIAS_BIASCAL_EN |
3395 			MG_PLL_BIAS_CTRIM(12) |
3396 			MG_PLL_BIAS_VREF_RDAC(4) |
3397 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3398 
3399 		if (refclk_khz == 38400) {
3400 			pll_state->mg_pll_tdc_coldst_bias_mask =
3401 				MG_PLL_TDC_COLDST_COLDSTART;
3402 			pll_state->mg_pll_bias_mask = 0;
3403 		} else {
3404 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3405 			pll_state->mg_pll_bias_mask = -1U;
3406 		}
3407 
3408 		pll_state->mg_pll_tdc_coldst_bias &=
3409 			pll_state->mg_pll_tdc_coldst_bias_mask;
3410 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3411 	}
3412 
3413 	return true;
3414 }
3415 
3416 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3417 				   const struct intel_shared_dpll *pll,
3418 				   const struct intel_dpll_hw_state *pll_state)
3419 {
3420 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3421 	u64 tmp;
3422 
3423 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3424 
3425 	if (DISPLAY_VER(dev_priv) >= 12) {
3426 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3427 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3428 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3429 
3430 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3431 			m2_frac = pll_state->mg_pll_bias &
3432 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3433 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3434 		} else {
3435 			m2_frac = 0;
3436 		}
3437 	} else {
3438 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3439 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3440 
3441 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3442 			m2_frac = pll_state->mg_pll_div0 &
3443 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3444 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3445 		} else {
3446 			m2_frac = 0;
3447 		}
3448 	}
3449 
3450 	switch (pll_state->mg_clktop2_hsclkctl &
3451 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3452 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3453 		div1 = 2;
3454 		break;
3455 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3456 		div1 = 3;
3457 		break;
3458 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3459 		div1 = 5;
3460 		break;
3461 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3462 		div1 = 7;
3463 		break;
3464 	default:
3465 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3466 		return 0;
3467 	}
3468 
3469 	div2 = (pll_state->mg_clktop2_hsclkctl &
3470 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3471 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3472 
3473 	/* div2 value of 0 is same as 1 means no div */
3474 	if (div2 == 0)
3475 		div2 = 1;
3476 
3477 	/*
3478 	 * Adjust the original formula to delay the division by 2^22 in order to
3479 	 * minimize possible rounding errors.
3480 	 */
3481 	tmp = (u64)m1 * m2_int * ref_clock +
3482 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3483 	tmp = div_u64(tmp, 5 * div1 * div2);
3484 
3485 	return tmp;
3486 }
3487 
3488 /**
3489  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3490  * @crtc_state: state for the CRTC to select the DPLL for
3491  * @port_dpll_id: the active @port_dpll_id to select
3492  *
3493  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3494  * CRTC.
3495  */
3496 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3497 			      enum icl_port_dpll_id port_dpll_id)
3498 {
3499 	struct icl_port_dpll *port_dpll =
3500 		&crtc_state->icl_port_dplls[port_dpll_id];
3501 
3502 	crtc_state->shared_dpll = port_dpll->pll;
3503 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3504 }
3505 
3506 static void icl_update_active_dpll(struct intel_atomic_state *state,
3507 				   struct intel_crtc *crtc,
3508 				   struct intel_encoder *encoder)
3509 {
3510 	struct intel_crtc_state *crtc_state =
3511 		intel_atomic_get_new_crtc_state(state, crtc);
3512 	struct intel_digital_port *primary_port;
3513 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3514 
3515 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3516 		enc_to_mst(encoder)->primary :
3517 		enc_to_dig_port(encoder);
3518 
3519 	if (primary_port &&
3520 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3521 	     primary_port->tc_mode == TC_PORT_LEGACY))
3522 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3523 
3524 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3525 }
3526 
3527 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3528 {
3529 	if (!(i915->hti_state & HDPORT_ENABLED))
3530 		return 0;
3531 
3532 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3533 }
3534 
3535 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3536 				   struct intel_crtc *crtc,
3537 				   struct intel_encoder *encoder)
3538 {
3539 	struct intel_crtc_state *crtc_state =
3540 		intel_atomic_get_new_crtc_state(state, crtc);
3541 	struct skl_wrpll_params pll_params = { };
3542 	struct icl_port_dpll *port_dpll =
3543 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3544 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3545 	enum port port = encoder->port;
3546 	unsigned long dpll_mask;
3547 	int ret;
3548 
3549 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3550 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3551 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3552 	else
3553 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3554 
3555 	if (!ret) {
3556 		drm_dbg_kms(&dev_priv->drm,
3557 			    "Could not calculate combo PHY PLL state.\n");
3558 
3559 		return false;
3560 	}
3561 
3562 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3563 
3564 	if (IS_ALDERLAKE_S(dev_priv)) {
3565 		dpll_mask =
3566 			BIT(DPLL_ID_DG1_DPLL3) |
3567 			BIT(DPLL_ID_DG1_DPLL2) |
3568 			BIT(DPLL_ID_ICL_DPLL1) |
3569 			BIT(DPLL_ID_ICL_DPLL0);
3570 	} else if (IS_DG1(dev_priv)) {
3571 		if (port == PORT_D || port == PORT_E) {
3572 			dpll_mask =
3573 				BIT(DPLL_ID_DG1_DPLL2) |
3574 				BIT(DPLL_ID_DG1_DPLL3);
3575 		} else {
3576 			dpll_mask =
3577 				BIT(DPLL_ID_DG1_DPLL0) |
3578 				BIT(DPLL_ID_DG1_DPLL1);
3579 		}
3580 	} else if (IS_ROCKETLAKE(dev_priv)) {
3581 		dpll_mask =
3582 			BIT(DPLL_ID_EHL_DPLL4) |
3583 			BIT(DPLL_ID_ICL_DPLL1) |
3584 			BIT(DPLL_ID_ICL_DPLL0);
3585 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3586 		dpll_mask =
3587 			BIT(DPLL_ID_EHL_DPLL4) |
3588 			BIT(DPLL_ID_ICL_DPLL1) |
3589 			BIT(DPLL_ID_ICL_DPLL0);
3590 	} else {
3591 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3592 	}
3593 
3594 	/* Eliminate DPLLs from consideration if reserved by HTI */
3595 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3596 
3597 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3598 						&port_dpll->hw_state,
3599 						dpll_mask);
3600 	if (!port_dpll->pll) {
3601 		drm_dbg_kms(&dev_priv->drm,
3602 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3603 			    encoder->base.base.id, encoder->base.name);
3604 		return false;
3605 	}
3606 
3607 	intel_reference_shared_dpll(state, crtc,
3608 				    port_dpll->pll, &port_dpll->hw_state);
3609 
3610 	icl_update_active_dpll(state, crtc, encoder);
3611 
3612 	return true;
3613 }
3614 
3615 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3616 				 struct intel_crtc *crtc,
3617 				 struct intel_encoder *encoder)
3618 {
3619 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3620 	struct intel_crtc_state *crtc_state =
3621 		intel_atomic_get_new_crtc_state(state, crtc);
3622 	struct skl_wrpll_params pll_params = { };
3623 	struct icl_port_dpll *port_dpll;
3624 	enum intel_dpll_id dpll_id;
3625 
3626 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3627 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3628 		drm_dbg_kms(&dev_priv->drm,
3629 			    "Could not calculate TBT PLL state.\n");
3630 		return false;
3631 	}
3632 
3633 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3634 
3635 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3636 						&port_dpll->hw_state,
3637 						BIT(DPLL_ID_ICL_TBTPLL));
3638 	if (!port_dpll->pll) {
3639 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3640 		return false;
3641 	}
3642 	intel_reference_shared_dpll(state, crtc,
3643 				    port_dpll->pll, &port_dpll->hw_state);
3644 
3645 
3646 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3647 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3648 		drm_dbg_kms(&dev_priv->drm,
3649 			    "Could not calculate MG PHY PLL state.\n");
3650 		goto err_unreference_tbt_pll;
3651 	}
3652 
3653 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3654 							 encoder->port));
3655 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3656 						&port_dpll->hw_state,
3657 						BIT(dpll_id));
3658 	if (!port_dpll->pll) {
3659 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3660 		goto err_unreference_tbt_pll;
3661 	}
3662 	intel_reference_shared_dpll(state, crtc,
3663 				    port_dpll->pll, &port_dpll->hw_state);
3664 
3665 	icl_update_active_dpll(state, crtc, encoder);
3666 
3667 	return true;
3668 
3669 err_unreference_tbt_pll:
3670 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3671 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3672 
3673 	return false;
3674 }
3675 
3676 static bool icl_get_dplls(struct intel_atomic_state *state,
3677 			  struct intel_crtc *crtc,
3678 			  struct intel_encoder *encoder)
3679 {
3680 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3681 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3682 
3683 	if (intel_phy_is_combo(dev_priv, phy))
3684 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3685 	else if (intel_phy_is_tc(dev_priv, phy))
3686 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3687 
3688 	MISSING_CASE(phy);
3689 
3690 	return false;
3691 }
3692 
3693 static void icl_put_dplls(struct intel_atomic_state *state,
3694 			  struct intel_crtc *crtc)
3695 {
3696 	const struct intel_crtc_state *old_crtc_state =
3697 		intel_atomic_get_old_crtc_state(state, crtc);
3698 	struct intel_crtc_state *new_crtc_state =
3699 		intel_atomic_get_new_crtc_state(state, crtc);
3700 	enum icl_port_dpll_id id;
3701 
3702 	new_crtc_state->shared_dpll = NULL;
3703 
3704 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3705 		const struct icl_port_dpll *old_port_dpll =
3706 			&old_crtc_state->icl_port_dplls[id];
3707 		struct icl_port_dpll *new_port_dpll =
3708 			&new_crtc_state->icl_port_dplls[id];
3709 
3710 		new_port_dpll->pll = NULL;
3711 
3712 		if (!old_port_dpll->pll)
3713 			continue;
3714 
3715 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3716 	}
3717 }
3718 
3719 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3720 				struct intel_shared_dpll *pll,
3721 				struct intel_dpll_hw_state *hw_state)
3722 {
3723 	const enum intel_dpll_id id = pll->info->id;
3724 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3725 	intel_wakeref_t wakeref;
3726 	bool ret = false;
3727 	u32 val;
3728 
3729 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3730 						     POWER_DOMAIN_DISPLAY_CORE);
3731 	if (!wakeref)
3732 		return false;
3733 
3734 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3735 	if (!(val & PLL_ENABLE))
3736 		goto out;
3737 
3738 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3739 						  MG_REFCLKIN_CTL(tc_port));
3740 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3741 
3742 	hw_state->mg_clktop2_coreclkctl1 =
3743 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3744 	hw_state->mg_clktop2_coreclkctl1 &=
3745 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3746 
3747 	hw_state->mg_clktop2_hsclkctl =
3748 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3749 	hw_state->mg_clktop2_hsclkctl &=
3750 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3751 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3752 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3753 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3754 
3755 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3756 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3757 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3758 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3759 						   MG_PLL_FRAC_LOCK(tc_port));
3760 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3761 
3762 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3763 	hw_state->mg_pll_tdc_coldst_bias =
3764 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3765 
3766 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3767 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3768 		hw_state->mg_pll_bias_mask = 0;
3769 	} else {
3770 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3771 		hw_state->mg_pll_bias_mask = -1U;
3772 	}
3773 
3774 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3775 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3776 
3777 	ret = true;
3778 out:
3779 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3780 	return ret;
3781 }
3782 
3783 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3784 				 struct intel_shared_dpll *pll,
3785 				 struct intel_dpll_hw_state *hw_state)
3786 {
3787 	const enum intel_dpll_id id = pll->info->id;
3788 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3789 	intel_wakeref_t wakeref;
3790 	bool ret = false;
3791 	u32 val;
3792 
3793 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3794 						     POWER_DOMAIN_DISPLAY_CORE);
3795 	if (!wakeref)
3796 		return false;
3797 
3798 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3799 	if (!(val & PLL_ENABLE))
3800 		goto out;
3801 
3802 	/*
3803 	 * All registers read here have the same HIP_INDEX_REG even though
3804 	 * they are on different building blocks
3805 	 */
3806 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3807 		       HIP_INDEX_VAL(tc_port, 0x2));
3808 
3809 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3810 						  DKL_REFCLKIN_CTL(tc_port));
3811 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3812 
3813 	hw_state->mg_clktop2_hsclkctl =
3814 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3815 	hw_state->mg_clktop2_hsclkctl &=
3816 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3817 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3818 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3819 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3820 
3821 	hw_state->mg_clktop2_coreclkctl1 =
3822 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3823 	hw_state->mg_clktop2_coreclkctl1 &=
3824 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3825 
3826 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3827 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3828 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3829 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3830 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3831 
3832 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3833 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3834 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3835 
3836 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3837 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3838 				 DKL_PLL_SSC_STEP_LEN_MASK |
3839 				 DKL_PLL_SSC_STEP_NUM_MASK |
3840 				 DKL_PLL_SSC_EN);
3841 
3842 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3843 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3844 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3845 
3846 	hw_state->mg_pll_tdc_coldst_bias =
3847 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3848 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3849 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3850 
3851 	ret = true;
3852 out:
3853 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3854 	return ret;
3855 }
3856 
3857 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3858 				 struct intel_shared_dpll *pll,
3859 				 struct intel_dpll_hw_state *hw_state,
3860 				 i915_reg_t enable_reg)
3861 {
3862 	const enum intel_dpll_id id = pll->info->id;
3863 	intel_wakeref_t wakeref;
3864 	bool ret = false;
3865 	u32 val;
3866 
3867 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3868 						     POWER_DOMAIN_DISPLAY_CORE);
3869 	if (!wakeref)
3870 		return false;
3871 
3872 	val = intel_de_read(dev_priv, enable_reg);
3873 	if (!(val & PLL_ENABLE))
3874 		goto out;
3875 
3876 	if (IS_ALDERLAKE_S(dev_priv)) {
3877 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3878 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3879 	} else if (IS_DG1(dev_priv)) {
3880 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3881 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3882 	} else if (IS_ROCKETLAKE(dev_priv)) {
3883 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3884 						 RKL_DPLL_CFGCR0(id));
3885 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3886 						 RKL_DPLL_CFGCR1(id));
3887 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3888 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3889 						 TGL_DPLL_CFGCR0(id));
3890 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3891 						 TGL_DPLL_CFGCR1(id));
3892 	} else {
3893 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3894 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3895 							 ICL_DPLL_CFGCR0(4));
3896 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3897 							 ICL_DPLL_CFGCR1(4));
3898 		} else {
3899 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3900 							 ICL_DPLL_CFGCR0(id));
3901 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3902 							 ICL_DPLL_CFGCR1(id));
3903 		}
3904 	}
3905 
3906 	ret = true;
3907 out:
3908 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3909 	return ret;
3910 }
3911 
3912 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3913 				   struct intel_shared_dpll *pll,
3914 				   struct intel_dpll_hw_state *hw_state)
3915 {
3916 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3917 
3918 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3919 }
3920 
3921 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3922 				 struct intel_shared_dpll *pll,
3923 				 struct intel_dpll_hw_state *hw_state)
3924 {
3925 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3926 }
3927 
3928 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3929 			   struct intel_shared_dpll *pll)
3930 {
3931 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3932 	const enum intel_dpll_id id = pll->info->id;
3933 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3934 
3935 	if (IS_ALDERLAKE_S(dev_priv)) {
3936 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3937 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3938 	} else if (IS_DG1(dev_priv)) {
3939 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3940 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3941 	} else if (IS_ROCKETLAKE(dev_priv)) {
3942 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3943 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3944 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3945 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3946 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3947 	} else {
3948 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3949 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3950 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3951 		} else {
3952 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3953 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3954 		}
3955 	}
3956 
3957 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3958 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3959 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3960 }
3961 
3962 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3963 			     struct intel_shared_dpll *pll)
3964 {
3965 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3966 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3967 	u32 val;
3968 
3969 	/*
3970 	 * Some of the following registers have reserved fields, so program
3971 	 * these with RMW based on a mask. The mask can be fixed or generated
3972 	 * during the calc/readout phase if the mask depends on some other HW
3973 	 * state like refclk, see icl_calc_mg_pll_state().
3974 	 */
3975 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3976 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3977 	val |= hw_state->mg_refclkin_ctl;
3978 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3979 
3980 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3981 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3982 	val |= hw_state->mg_clktop2_coreclkctl1;
3983 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3984 
3985 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3986 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3987 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3988 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3989 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3990 	val |= hw_state->mg_clktop2_hsclkctl;
3991 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3992 
3993 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3994 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3995 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3996 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3997 		       hw_state->mg_pll_frac_lock);
3998 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3999 
4000 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
4001 	val &= ~hw_state->mg_pll_bias_mask;
4002 	val |= hw_state->mg_pll_bias;
4003 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
4004 
4005 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4006 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
4007 	val |= hw_state->mg_pll_tdc_coldst_bias;
4008 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
4009 
4010 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4011 }
4012 
4013 static void dkl_pll_write(struct drm_i915_private *dev_priv,
4014 			  struct intel_shared_dpll *pll)
4015 {
4016 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
4017 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
4018 	u32 val;
4019 
4020 	/*
4021 	 * All registers programmed here have the same HIP_INDEX_REG even
4022 	 * though on different building block
4023 	 */
4024 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
4025 		       HIP_INDEX_VAL(tc_port, 0x2));
4026 
4027 	/* All the registers are RMW */
4028 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
4029 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
4030 	val |= hw_state->mg_refclkin_ctl;
4031 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
4032 
4033 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
4034 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
4035 	val |= hw_state->mg_clktop2_coreclkctl1;
4036 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
4037 
4038 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
4039 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4040 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4041 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4042 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4043 	val |= hw_state->mg_clktop2_hsclkctl;
4044 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
4045 
4046 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
4047 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
4048 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
4049 		 DKL_PLL_DIV0_FBPREDIV_MASK |
4050 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
4051 	val |= hw_state->mg_pll_div0;
4052 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
4053 
4054 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
4055 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4056 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4057 	val |= hw_state->mg_pll_div1;
4058 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4059 
4060 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4061 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4062 		 DKL_PLL_SSC_STEP_LEN_MASK |
4063 		 DKL_PLL_SSC_STEP_NUM_MASK |
4064 		 DKL_PLL_SSC_EN);
4065 	val |= hw_state->mg_pll_ssc;
4066 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4067 
4068 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4069 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4070 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4071 	val |= hw_state->mg_pll_bias;
4072 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4073 
4074 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4075 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4076 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4077 	val |= hw_state->mg_pll_tdc_coldst_bias;
4078 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4079 
4080 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4081 }
4082 
4083 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4084 				 struct intel_shared_dpll *pll,
4085 				 i915_reg_t enable_reg)
4086 {
4087 	u32 val;
4088 
4089 	val = intel_de_read(dev_priv, enable_reg);
4090 	val |= PLL_POWER_ENABLE;
4091 	intel_de_write(dev_priv, enable_reg, val);
4092 
4093 	/*
4094 	 * The spec says we need to "wait" but it also says it should be
4095 	 * immediate.
4096 	 */
4097 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4098 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4099 			pll->info->id);
4100 }
4101 
4102 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4103 			   struct intel_shared_dpll *pll,
4104 			   i915_reg_t enable_reg)
4105 {
4106 	u32 val;
4107 
4108 	val = intel_de_read(dev_priv, enable_reg);
4109 	val |= PLL_ENABLE;
4110 	intel_de_write(dev_priv, enable_reg, val);
4111 
4112 	/* Timeout is actually 600us. */
4113 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4114 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4115 }
4116 
4117 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4118 			     struct intel_shared_dpll *pll)
4119 {
4120 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4121 
4122 	if (IS_JSL_EHL(dev_priv) &&
4123 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4124 
4125 		/*
4126 		 * We need to disable DC states when this DPLL is enabled.
4127 		 * This can be done by taking a reference on DPLL4 power
4128 		 * domain.
4129 		 */
4130 		pll->wakeref = intel_display_power_get(dev_priv,
4131 						       POWER_DOMAIN_DPLL_DC_OFF);
4132 	}
4133 
4134 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4135 
4136 	icl_dpll_write(dev_priv, pll);
4137 
4138 	/*
4139 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4140 	 * paths should already be setting the appropriate voltage, hence we do
4141 	 * nothing here.
4142 	 */
4143 
4144 	icl_pll_enable(dev_priv, pll, enable_reg);
4145 
4146 	/* DVFS post sequence would be here. See the comment above. */
4147 }
4148 
4149 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4150 			   struct intel_shared_dpll *pll)
4151 {
4152 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4153 
4154 	icl_dpll_write(dev_priv, pll);
4155 
4156 	/*
4157 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4158 	 * paths should already be setting the appropriate voltage, hence we do
4159 	 * nothing here.
4160 	 */
4161 
4162 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4163 
4164 	/* DVFS post sequence would be here. See the comment above. */
4165 }
4166 
4167 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4168 			  struct intel_shared_dpll *pll)
4169 {
4170 	i915_reg_t enable_reg =
4171 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4172 
4173 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4174 
4175 	if (DISPLAY_VER(dev_priv) >= 12)
4176 		dkl_pll_write(dev_priv, pll);
4177 	else
4178 		icl_mg_pll_write(dev_priv, pll);
4179 
4180 	/*
4181 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4182 	 * paths should already be setting the appropriate voltage, hence we do
4183 	 * nothing here.
4184 	 */
4185 
4186 	icl_pll_enable(dev_priv, pll, enable_reg);
4187 
4188 	/* DVFS post sequence would be here. See the comment above. */
4189 }
4190 
4191 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4192 			    struct intel_shared_dpll *pll,
4193 			    i915_reg_t enable_reg)
4194 {
4195 	u32 val;
4196 
4197 	/* The first steps are done by intel_ddi_post_disable(). */
4198 
4199 	/*
4200 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4201 	 * paths should already be setting the appropriate voltage, hence we do
4202 	 * nothing here.
4203 	 */
4204 
4205 	val = intel_de_read(dev_priv, enable_reg);
4206 	val &= ~PLL_ENABLE;
4207 	intel_de_write(dev_priv, enable_reg, val);
4208 
4209 	/* Timeout is actually 1us. */
4210 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4211 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4212 
4213 	/* DVFS post sequence would be here. See the comment above. */
4214 
4215 	val = intel_de_read(dev_priv, enable_reg);
4216 	val &= ~PLL_POWER_ENABLE;
4217 	intel_de_write(dev_priv, enable_reg, val);
4218 
4219 	/*
4220 	 * The spec says we need to "wait" but it also says it should be
4221 	 * immediate.
4222 	 */
4223 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4224 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4225 			pll->info->id);
4226 }
4227 
4228 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4229 			      struct intel_shared_dpll *pll)
4230 {
4231 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4232 
4233 	icl_pll_disable(dev_priv, pll, enable_reg);
4234 
4235 	if (IS_JSL_EHL(dev_priv) &&
4236 	    pll->info->id == DPLL_ID_EHL_DPLL4)
4237 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4238 					pll->wakeref);
4239 }
4240 
4241 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4242 			    struct intel_shared_dpll *pll)
4243 {
4244 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4245 }
4246 
4247 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4248 			   struct intel_shared_dpll *pll)
4249 {
4250 	i915_reg_t enable_reg =
4251 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4252 
4253 	icl_pll_disable(dev_priv, pll, enable_reg);
4254 }
4255 
4256 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4257 {
4258 	/* No SSC ref */
4259 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4260 }
4261 
4262 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4263 			      const struct intel_dpll_hw_state *hw_state)
4264 {
4265 	drm_dbg_kms(&dev_priv->drm,
4266 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4267 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4268 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4269 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4270 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4271 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4272 		    hw_state->cfgcr0, hw_state->cfgcr1,
4273 		    hw_state->mg_refclkin_ctl,
4274 		    hw_state->mg_clktop2_coreclkctl1,
4275 		    hw_state->mg_clktop2_hsclkctl,
4276 		    hw_state->mg_pll_div0,
4277 		    hw_state->mg_pll_div1,
4278 		    hw_state->mg_pll_lf,
4279 		    hw_state->mg_pll_frac_lock,
4280 		    hw_state->mg_pll_ssc,
4281 		    hw_state->mg_pll_bias,
4282 		    hw_state->mg_pll_tdc_coldst_bias);
4283 }
4284 
4285 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4286 	.enable = combo_pll_enable,
4287 	.disable = combo_pll_disable,
4288 	.get_hw_state = combo_pll_get_hw_state,
4289 	.get_freq = icl_ddi_combo_pll_get_freq,
4290 };
4291 
4292 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4293 	.enable = tbt_pll_enable,
4294 	.disable = tbt_pll_disable,
4295 	.get_hw_state = tbt_pll_get_hw_state,
4296 	.get_freq = icl_ddi_tbt_pll_get_freq,
4297 };
4298 
4299 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4300 	.enable = mg_pll_enable,
4301 	.disable = mg_pll_disable,
4302 	.get_hw_state = mg_pll_get_hw_state,
4303 	.get_freq = icl_ddi_mg_pll_get_freq,
4304 };
4305 
4306 static const struct dpll_info icl_plls[] = {
4307 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4308 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4309 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4310 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4311 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4312 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4313 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4314 	{ },
4315 };
4316 
4317 static const struct intel_dpll_mgr icl_pll_mgr = {
4318 	.dpll_info = icl_plls,
4319 	.get_dplls = icl_get_dplls,
4320 	.put_dplls = icl_put_dplls,
4321 	.update_active_dpll = icl_update_active_dpll,
4322 	.update_ref_clks = icl_update_dpll_ref_clks,
4323 	.dump_hw_state = icl_dump_hw_state,
4324 };
4325 
4326 static const struct dpll_info ehl_plls[] = {
4327 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4328 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4329 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4330 	{ },
4331 };
4332 
4333 static const struct intel_dpll_mgr ehl_pll_mgr = {
4334 	.dpll_info = ehl_plls,
4335 	.get_dplls = icl_get_dplls,
4336 	.put_dplls = icl_put_dplls,
4337 	.update_ref_clks = icl_update_dpll_ref_clks,
4338 	.dump_hw_state = icl_dump_hw_state,
4339 };
4340 
4341 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4342 	.enable = mg_pll_enable,
4343 	.disable = mg_pll_disable,
4344 	.get_hw_state = dkl_pll_get_hw_state,
4345 	.get_freq = icl_ddi_mg_pll_get_freq,
4346 };
4347 
4348 static const struct dpll_info tgl_plls[] = {
4349 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4350 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4351 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4352 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4353 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4354 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4355 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4356 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4357 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4358 	{ },
4359 };
4360 
4361 static const struct intel_dpll_mgr tgl_pll_mgr = {
4362 	.dpll_info = tgl_plls,
4363 	.get_dplls = icl_get_dplls,
4364 	.put_dplls = icl_put_dplls,
4365 	.update_active_dpll = icl_update_active_dpll,
4366 	.update_ref_clks = icl_update_dpll_ref_clks,
4367 	.dump_hw_state = icl_dump_hw_state,
4368 };
4369 
4370 static const struct dpll_info rkl_plls[] = {
4371 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4372 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4373 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4374 	{ },
4375 };
4376 
4377 static const struct intel_dpll_mgr rkl_pll_mgr = {
4378 	.dpll_info = rkl_plls,
4379 	.get_dplls = icl_get_dplls,
4380 	.put_dplls = icl_put_dplls,
4381 	.update_ref_clks = icl_update_dpll_ref_clks,
4382 	.dump_hw_state = icl_dump_hw_state,
4383 };
4384 
4385 static const struct dpll_info dg1_plls[] = {
4386 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4387 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4388 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4389 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4390 	{ },
4391 };
4392 
4393 static const struct intel_dpll_mgr dg1_pll_mgr = {
4394 	.dpll_info = dg1_plls,
4395 	.get_dplls = icl_get_dplls,
4396 	.put_dplls = icl_put_dplls,
4397 	.update_ref_clks = icl_update_dpll_ref_clks,
4398 	.dump_hw_state = icl_dump_hw_state,
4399 };
4400 
4401 static const struct dpll_info adls_plls[] = {
4402 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4403 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4404 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4405 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4406 	{ },
4407 };
4408 
4409 static const struct intel_dpll_mgr adls_pll_mgr = {
4410 	.dpll_info = adls_plls,
4411 	.get_dplls = icl_get_dplls,
4412 	.put_dplls = icl_put_dplls,
4413 	.update_ref_clks = icl_update_dpll_ref_clks,
4414 	.dump_hw_state = icl_dump_hw_state,
4415 };
4416 
4417 /**
4418  * intel_shared_dpll_init - Initialize shared DPLLs
4419  * @dev: drm device
4420  *
4421  * Initialize shared DPLLs for @dev.
4422  */
4423 void intel_shared_dpll_init(struct drm_device *dev)
4424 {
4425 	struct drm_i915_private *dev_priv = to_i915(dev);
4426 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4427 	const struct dpll_info *dpll_info;
4428 	int i;
4429 
4430 	if (IS_ALDERLAKE_S(dev_priv))
4431 		dpll_mgr = &adls_pll_mgr;
4432 	else if (IS_DG1(dev_priv))
4433 		dpll_mgr = &dg1_pll_mgr;
4434 	else if (IS_ROCKETLAKE(dev_priv))
4435 		dpll_mgr = &rkl_pll_mgr;
4436 	else if (DISPLAY_VER(dev_priv) >= 12)
4437 		dpll_mgr = &tgl_pll_mgr;
4438 	else if (IS_JSL_EHL(dev_priv))
4439 		dpll_mgr = &ehl_pll_mgr;
4440 	else if (DISPLAY_VER(dev_priv) >= 11)
4441 		dpll_mgr = &icl_pll_mgr;
4442 	else if (IS_CANNONLAKE(dev_priv))
4443 		dpll_mgr = &cnl_pll_mgr;
4444 	else if (IS_GEN9_BC(dev_priv))
4445 		dpll_mgr = &skl_pll_mgr;
4446 	else if (IS_GEN9_LP(dev_priv))
4447 		dpll_mgr = &bxt_pll_mgr;
4448 	else if (HAS_DDI(dev_priv))
4449 		dpll_mgr = &hsw_pll_mgr;
4450 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4451 		dpll_mgr = &pch_pll_mgr;
4452 
4453 	if (!dpll_mgr) {
4454 		dev_priv->dpll.num_shared_dpll = 0;
4455 		return;
4456 	}
4457 
4458 	dpll_info = dpll_mgr->dpll_info;
4459 
4460 	for (i = 0; dpll_info[i].name; i++) {
4461 		drm_WARN_ON(dev, i != dpll_info[i].id);
4462 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4463 	}
4464 
4465 	dev_priv->dpll.mgr = dpll_mgr;
4466 	dev_priv->dpll.num_shared_dpll = i;
4467 	mutex_init(&dev_priv->dpll.lock);
4468 
4469 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4470 }
4471 
4472 /**
4473  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4474  * @state: atomic state
4475  * @crtc: CRTC to reserve DPLLs for
4476  * @encoder: encoder
4477  *
4478  * This function reserves all required DPLLs for the given CRTC and encoder
4479  * combination in the current atomic commit @state and the new @crtc atomic
4480  * state.
4481  *
4482  * The new configuration in the atomic commit @state is made effective by
4483  * calling intel_shared_dpll_swap_state().
4484  *
4485  * The reserved DPLLs should be released by calling
4486  * intel_release_shared_dplls().
4487  *
4488  * Returns:
4489  * True if all required DPLLs were successfully reserved.
4490  */
4491 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4492 				struct intel_crtc *crtc,
4493 				struct intel_encoder *encoder)
4494 {
4495 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4496 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4497 
4498 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4499 		return false;
4500 
4501 	return dpll_mgr->get_dplls(state, crtc, encoder);
4502 }
4503 
4504 /**
4505  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4506  * @state: atomic state
4507  * @crtc: crtc from which the DPLLs are to be released
4508  *
4509  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4510  * from the current atomic commit @state and the old @crtc atomic state.
4511  *
4512  * The new configuration in the atomic commit @state is made effective by
4513  * calling intel_shared_dpll_swap_state().
4514  */
4515 void intel_release_shared_dplls(struct intel_atomic_state *state,
4516 				struct intel_crtc *crtc)
4517 {
4518 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4519 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4520 
4521 	/*
4522 	 * FIXME: this function is called for every platform having a
4523 	 * compute_clock hook, even though the platform doesn't yet support
4524 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4525 	 * called on those.
4526 	 */
4527 	if (!dpll_mgr)
4528 		return;
4529 
4530 	dpll_mgr->put_dplls(state, crtc);
4531 }
4532 
4533 /**
4534  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4535  * @state: atomic state
4536  * @crtc: the CRTC for which to update the active DPLL
4537  * @encoder: encoder determining the type of port DPLL
4538  *
4539  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4540  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4541  * DPLL selected will be based on the current mode of the encoder's port.
4542  */
4543 void intel_update_active_dpll(struct intel_atomic_state *state,
4544 			      struct intel_crtc *crtc,
4545 			      struct intel_encoder *encoder)
4546 {
4547 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4548 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4549 
4550 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4551 		return;
4552 
4553 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4554 }
4555 
4556 /**
4557  * intel_dpll_get_freq - calculate the DPLL's output frequency
4558  * @i915: i915 device
4559  * @pll: DPLL for which to calculate the output frequency
4560  * @pll_state: DPLL state from which to calculate the output frequency
4561  *
4562  * Return the output frequency corresponding to @pll's passed in @pll_state.
4563  */
4564 int intel_dpll_get_freq(struct drm_i915_private *i915,
4565 			const struct intel_shared_dpll *pll,
4566 			const struct intel_dpll_hw_state *pll_state)
4567 {
4568 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4569 		return 0;
4570 
4571 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4572 }
4573 
4574 /**
4575  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4576  * @i915: i915 device
4577  * @pll: DPLL for which to calculate the output frequency
4578  * @hw_state: DPLL's hardware state
4579  *
4580  * Read out @pll's hardware state into @hw_state.
4581  */
4582 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4583 			     struct intel_shared_dpll *pll,
4584 			     struct intel_dpll_hw_state *hw_state)
4585 {
4586 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4587 }
4588 
4589 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4590 				  struct intel_shared_dpll *pll)
4591 {
4592 	struct intel_crtc *crtc;
4593 
4594 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4595 
4596 	if (IS_JSL_EHL(i915) && pll->on &&
4597 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4598 		pll->wakeref = intel_display_power_get(i915,
4599 						       POWER_DOMAIN_DPLL_DC_OFF);
4600 	}
4601 
4602 	pll->state.pipe_mask = 0;
4603 	for_each_intel_crtc(&i915->drm, crtc) {
4604 		struct intel_crtc_state *crtc_state =
4605 			to_intel_crtc_state(crtc->base.state);
4606 
4607 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4608 			pll->state.pipe_mask |= BIT(crtc->pipe);
4609 	}
4610 	pll->active_mask = pll->state.pipe_mask;
4611 
4612 	drm_dbg_kms(&i915->drm,
4613 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4614 		    pll->info->name, pll->state.pipe_mask, pll->on);
4615 }
4616 
4617 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4618 {
4619 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4620 		i915->dpll.mgr->update_ref_clks(i915);
4621 }
4622 
4623 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4624 {
4625 	int i;
4626 
4627 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4628 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4629 }
4630 
4631 static void sanitize_dpll_state(struct drm_i915_private *i915,
4632 				struct intel_shared_dpll *pll)
4633 {
4634 	if (!pll->on || pll->active_mask)
4635 		return;
4636 
4637 	drm_dbg_kms(&i915->drm,
4638 		    "%s enabled but not in use, disabling\n",
4639 		    pll->info->name);
4640 
4641 	pll->info->funcs->disable(i915, pll);
4642 	pll->on = false;
4643 }
4644 
4645 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4646 {
4647 	int i;
4648 
4649 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4650 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4651 }
4652 
4653 /**
4654  * intel_dpll_dump_hw_state - write hw_state to dmesg
4655  * @dev_priv: i915 drm device
4656  * @hw_state: hw state to be written to the log
4657  *
4658  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4659  */
4660 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4661 			      const struct intel_dpll_hw_state *hw_state)
4662 {
4663 	if (dev_priv->dpll.mgr) {
4664 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4665 	} else {
4666 		/* fallback for platforms that don't use the shared dpll
4667 		 * infrastructure
4668 		 */
4669 		drm_dbg_kms(&dev_priv->drm,
4670 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4671 			    "fp0: 0x%x, fp1: 0x%x\n",
4672 			    hw_state->dpll,
4673 			    hw_state->dpll_md,
4674 			    hw_state->fp0,
4675 			    hw_state->fp1);
4676 	}
4677 }
4678