1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 struct intel_dpll_mgr {
49 	const struct dpll_info *dpll_info;
50 
51 	bool (*get_dplls)(struct intel_atomic_state *state,
52 			  struct intel_crtc *crtc,
53 			  struct intel_encoder *encoder);
54 	void (*put_dplls)(struct intel_atomic_state *state,
55 			  struct intel_crtc *crtc);
56 	void (*update_active_dpll)(struct intel_atomic_state *state,
57 				   struct intel_crtc *crtc,
58 				   struct intel_encoder *encoder);
59 	void (*update_ref_clks)(struct drm_i915_private *i915);
60 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
61 			      const struct intel_dpll_hw_state *hw_state);
62 };
63 
64 static void
65 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
66 				  struct intel_shared_dpll_state *shared_dpll)
67 {
68 	enum intel_dpll_id i;
69 
70 	/* Copy shared dpll state */
71 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
72 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
73 
74 		shared_dpll[i] = pll->state;
75 	}
76 }
77 
78 static struct intel_shared_dpll_state *
79 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
80 {
81 	struct intel_atomic_state *state = to_intel_atomic_state(s);
82 
83 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
84 
85 	if (!state->dpll_set) {
86 		state->dpll_set = true;
87 
88 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
89 						  state->shared_dpll);
90 	}
91 
92 	return state->shared_dpll;
93 }
94 
95 /**
96  * intel_get_shared_dpll_by_id - get a DPLL given its id
97  * @dev_priv: i915 device instance
98  * @id: pll id
99  *
100  * Returns:
101  * A pointer to the DPLL with @id
102  */
103 struct intel_shared_dpll *
104 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
105 			    enum intel_dpll_id id)
106 {
107 	return &dev_priv->dpll.shared_dplls[id];
108 }
109 
110 /**
111  * intel_get_shared_dpll_id - get the id of a DPLL
112  * @dev_priv: i915 device instance
113  * @pll: the DPLL
114  *
115  * Returns:
116  * The id of @pll
117  */
118 enum intel_dpll_id
119 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
120 			 struct intel_shared_dpll *pll)
121 {
122 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
123 
124 	if (drm_WARN_ON(&dev_priv->drm,
125 			pll_idx < 0 ||
126 			pll_idx >= dev_priv->dpll.num_shared_dpll))
127 		return -1;
128 
129 	return pll_idx;
130 }
131 
132 /* For ILK+ */
133 void assert_shared_dpll(struct drm_i915_private *dev_priv,
134 			struct intel_shared_dpll *pll,
135 			bool state)
136 {
137 	bool cur_state;
138 	struct intel_dpll_hw_state hw_state;
139 
140 	if (drm_WARN(&dev_priv->drm, !pll,
141 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
142 		return;
143 
144 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
145 	I915_STATE_WARN(cur_state != state,
146 	     "%s assertion failure (expected %s, current %s)\n",
147 			pll->info->name, onoff(state), onoff(cur_state));
148 }
149 
150 static i915_reg_t
151 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
152 			   struct intel_shared_dpll *pll)
153 {
154 	if (IS_DG1(i915))
155 		return DG1_DPLL_ENABLE(pll->info->id);
156 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
157 		return MG_PLL_ENABLE(0);
158 
159 	return CNL_DPLL_ENABLE(pll->info->id);
160 }
161 
162 /**
163  * intel_prepare_shared_dpll - call a dpll's prepare hook
164  * @crtc_state: CRTC, and its state, which has a shared dpll
165  *
166  * This calls the PLL's prepare hook if it has one and if the PLL is not
167  * already enabled. The prepare hook is platform specific.
168  */
169 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
170 {
171 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
173 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
174 
175 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
176 		return;
177 
178 	mutex_lock(&dev_priv->dpll.lock);
179 	drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
180 	if (!pll->active_mask) {
181 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
182 		drm_WARN_ON(&dev_priv->drm, pll->on);
183 		assert_shared_dpll_disabled(dev_priv, pll);
184 
185 		pll->info->funcs->prepare(dev_priv, pll);
186 	}
187 	mutex_unlock(&dev_priv->dpll.lock);
188 }
189 
190 /**
191  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
192  * @crtc_state: CRTC, and its state, which has a shared DPLL
193  *
194  * Enable the shared DPLL used by @crtc.
195  */
196 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
197 {
198 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
199 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
200 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
201 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
202 	unsigned int old_mask;
203 
204 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
205 		return;
206 
207 	mutex_lock(&dev_priv->dpll.lock);
208 	old_mask = pll->active_mask;
209 
210 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
211 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
212 		goto out;
213 
214 	pll->active_mask |= crtc_mask;
215 
216 	drm_dbg_kms(&dev_priv->drm,
217 		    "enable %s (active %x, on? %d) for crtc %d\n",
218 		    pll->info->name, pll->active_mask, pll->on,
219 		    crtc->base.base.id);
220 
221 	if (old_mask) {
222 		drm_WARN_ON(&dev_priv->drm, !pll->on);
223 		assert_shared_dpll_enabled(dev_priv, pll);
224 		goto out;
225 	}
226 	drm_WARN_ON(&dev_priv->drm, pll->on);
227 
228 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
229 	pll->info->funcs->enable(dev_priv, pll);
230 	pll->on = true;
231 
232 out:
233 	mutex_unlock(&dev_priv->dpll.lock);
234 }
235 
236 /**
237  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
238  * @crtc_state: CRTC, and its state, which has a shared DPLL
239  *
240  * Disable the shared DPLL used by @crtc.
241  */
242 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
243 {
244 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
245 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
246 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
247 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
248 
249 	/* PCH only available on ILK+ */
250 	if (INTEL_GEN(dev_priv) < 5)
251 		return;
252 
253 	if (pll == NULL)
254 		return;
255 
256 	mutex_lock(&dev_priv->dpll.lock);
257 	if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
258 		goto out;
259 
260 	drm_dbg_kms(&dev_priv->drm,
261 		    "disable %s (active %x, on? %d) for crtc %d\n",
262 		    pll->info->name, pll->active_mask, pll->on,
263 		    crtc->base.base.id);
264 
265 	assert_shared_dpll_enabled(dev_priv, pll);
266 	drm_WARN_ON(&dev_priv->drm, !pll->on);
267 
268 	pll->active_mask &= ~crtc_mask;
269 	if (pll->active_mask)
270 		goto out;
271 
272 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
273 	pll->info->funcs->disable(dev_priv, pll);
274 	pll->on = false;
275 
276 out:
277 	mutex_unlock(&dev_priv->dpll.lock);
278 }
279 
280 static struct intel_shared_dpll *
281 intel_find_shared_dpll(struct intel_atomic_state *state,
282 		       const struct intel_crtc *crtc,
283 		       const struct intel_dpll_hw_state *pll_state,
284 		       unsigned long dpll_mask)
285 {
286 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 	struct intel_shared_dpll *pll, *unused_pll = NULL;
288 	struct intel_shared_dpll_state *shared_dpll;
289 	enum intel_dpll_id i;
290 
291 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
292 
293 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
294 
295 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
296 		pll = &dev_priv->dpll.shared_dplls[i];
297 
298 		/* Only want to check enabled timings first */
299 		if (shared_dpll[i].crtc_mask == 0) {
300 			if (!unused_pll)
301 				unused_pll = pll;
302 			continue;
303 		}
304 
305 		if (memcmp(pll_state,
306 			   &shared_dpll[i].hw_state,
307 			   sizeof(*pll_state)) == 0) {
308 			drm_dbg_kms(&dev_priv->drm,
309 				    "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
310 				    crtc->base.base.id, crtc->base.name,
311 				    pll->info->name,
312 				    shared_dpll[i].crtc_mask,
313 				    pll->active_mask);
314 			return pll;
315 		}
316 	}
317 
318 	/* Ok no matching timings, maybe there's a free one? */
319 	if (unused_pll) {
320 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
321 			    crtc->base.base.id, crtc->base.name,
322 			    unused_pll->info->name);
323 		return unused_pll;
324 	}
325 
326 	return NULL;
327 }
328 
329 static void
330 intel_reference_shared_dpll(struct intel_atomic_state *state,
331 			    const struct intel_crtc *crtc,
332 			    const struct intel_shared_dpll *pll,
333 			    const struct intel_dpll_hw_state *pll_state)
334 {
335 	struct drm_i915_private *i915 = to_i915(state->base.dev);
336 	struct intel_shared_dpll_state *shared_dpll;
337 	const enum intel_dpll_id id = pll->info->id;
338 
339 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
340 
341 	if (shared_dpll[id].crtc_mask == 0)
342 		shared_dpll[id].hw_state = *pll_state;
343 
344 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
345 		pipe_name(crtc->pipe));
346 
347 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
348 }
349 
350 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
351 					  const struct intel_crtc *crtc,
352 					  const struct intel_shared_dpll *pll)
353 {
354 	struct intel_shared_dpll_state *shared_dpll;
355 
356 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
357 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
358 }
359 
360 static void intel_put_dpll(struct intel_atomic_state *state,
361 			   struct intel_crtc *crtc)
362 {
363 	const struct intel_crtc_state *old_crtc_state =
364 		intel_atomic_get_old_crtc_state(state, crtc);
365 	struct intel_crtc_state *new_crtc_state =
366 		intel_atomic_get_new_crtc_state(state, crtc);
367 
368 	new_crtc_state->shared_dpll = NULL;
369 
370 	if (!old_crtc_state->shared_dpll)
371 		return;
372 
373 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
374 }
375 
376 /**
377  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
378  * @state: atomic state
379  *
380  * This is the dpll version of drm_atomic_helper_swap_state() since the
381  * helper does not handle driver-specific global state.
382  *
383  * For consistency with atomic helpers this function does a complete swap,
384  * i.e. it also puts the current state into @state, even though there is no
385  * need for that at this moment.
386  */
387 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
388 {
389 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
390 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
391 	enum intel_dpll_id i;
392 
393 	if (!state->dpll_set)
394 		return;
395 
396 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
397 		struct intel_shared_dpll *pll =
398 			&dev_priv->dpll.shared_dplls[i];
399 
400 		swap(pll->state, shared_dpll[i]);
401 	}
402 }
403 
404 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
405 				      struct intel_shared_dpll *pll,
406 				      struct intel_dpll_hw_state *hw_state)
407 {
408 	const enum intel_dpll_id id = pll->info->id;
409 	intel_wakeref_t wakeref;
410 	u32 val;
411 
412 	wakeref = intel_display_power_get_if_enabled(dev_priv,
413 						     POWER_DOMAIN_DISPLAY_CORE);
414 	if (!wakeref)
415 		return false;
416 
417 	val = intel_de_read(dev_priv, PCH_DPLL(id));
418 	hw_state->dpll = val;
419 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
420 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
421 
422 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
423 
424 	return val & DPLL_VCO_ENABLE;
425 }
426 
427 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
428 				 struct intel_shared_dpll *pll)
429 {
430 	const enum intel_dpll_id id = pll->info->id;
431 
432 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
433 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
434 }
435 
436 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
437 {
438 	u32 val;
439 	bool enabled;
440 
441 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
442 
443 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
444 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
445 			    DREF_SUPERSPREAD_SOURCE_MASK));
446 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
447 }
448 
449 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
450 				struct intel_shared_dpll *pll)
451 {
452 	const enum intel_dpll_id id = pll->info->id;
453 
454 	/* PCH refclock must be enabled first */
455 	ibx_assert_pch_refclk_enabled(dev_priv);
456 
457 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
458 
459 	/* Wait for the clocks to stabilize. */
460 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
461 	udelay(150);
462 
463 	/* The pixel multiplier can only be updated once the
464 	 * DPLL is enabled and the clocks are stable.
465 	 *
466 	 * So write it again.
467 	 */
468 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
469 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
470 	udelay(200);
471 }
472 
473 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
474 				 struct intel_shared_dpll *pll)
475 {
476 	const enum intel_dpll_id id = pll->info->id;
477 
478 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
479 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
480 	udelay(200);
481 }
482 
483 static bool ibx_get_dpll(struct intel_atomic_state *state,
484 			 struct intel_crtc *crtc,
485 			 struct intel_encoder *encoder)
486 {
487 	struct intel_crtc_state *crtc_state =
488 		intel_atomic_get_new_crtc_state(state, crtc);
489 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
490 	struct intel_shared_dpll *pll;
491 	enum intel_dpll_id i;
492 
493 	if (HAS_PCH_IBX(dev_priv)) {
494 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
495 		i = (enum intel_dpll_id) crtc->pipe;
496 		pll = &dev_priv->dpll.shared_dplls[i];
497 
498 		drm_dbg_kms(&dev_priv->drm,
499 			    "[CRTC:%d:%s] using pre-allocated %s\n",
500 			    crtc->base.base.id, crtc->base.name,
501 			    pll->info->name);
502 	} else {
503 		pll = intel_find_shared_dpll(state, crtc,
504 					     &crtc_state->dpll_hw_state,
505 					     BIT(DPLL_ID_PCH_PLL_B) |
506 					     BIT(DPLL_ID_PCH_PLL_A));
507 	}
508 
509 	if (!pll)
510 		return false;
511 
512 	/* reference the pll */
513 	intel_reference_shared_dpll(state, crtc,
514 				    pll, &crtc_state->dpll_hw_state);
515 
516 	crtc_state->shared_dpll = pll;
517 
518 	return true;
519 }
520 
521 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
522 			      const struct intel_dpll_hw_state *hw_state)
523 {
524 	drm_dbg_kms(&dev_priv->drm,
525 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
526 		    "fp0: 0x%x, fp1: 0x%x\n",
527 		    hw_state->dpll,
528 		    hw_state->dpll_md,
529 		    hw_state->fp0,
530 		    hw_state->fp1);
531 }
532 
533 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
534 	.prepare = ibx_pch_dpll_prepare,
535 	.enable = ibx_pch_dpll_enable,
536 	.disable = ibx_pch_dpll_disable,
537 	.get_hw_state = ibx_pch_dpll_get_hw_state,
538 };
539 
540 static const struct dpll_info pch_plls[] = {
541 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
542 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
543 	{ },
544 };
545 
546 static const struct intel_dpll_mgr pch_pll_mgr = {
547 	.dpll_info = pch_plls,
548 	.get_dplls = ibx_get_dpll,
549 	.put_dplls = intel_put_dpll,
550 	.dump_hw_state = ibx_dump_hw_state,
551 };
552 
553 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
554 			       struct intel_shared_dpll *pll)
555 {
556 	const enum intel_dpll_id id = pll->info->id;
557 
558 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
559 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
560 	udelay(20);
561 }
562 
563 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
564 				struct intel_shared_dpll *pll)
565 {
566 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
567 	intel_de_posting_read(dev_priv, SPLL_CTL);
568 	udelay(20);
569 }
570 
571 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
572 				  struct intel_shared_dpll *pll)
573 {
574 	const enum intel_dpll_id id = pll->info->id;
575 	u32 val;
576 
577 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
578 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
579 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
580 
581 	/*
582 	 * Try to set up the PCH reference clock once all DPLLs
583 	 * that depend on it have been shut down.
584 	 */
585 	if (dev_priv->pch_ssc_use & BIT(id))
586 		intel_init_pch_refclk(dev_priv);
587 }
588 
589 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
590 				 struct intel_shared_dpll *pll)
591 {
592 	enum intel_dpll_id id = pll->info->id;
593 	u32 val;
594 
595 	val = intel_de_read(dev_priv, SPLL_CTL);
596 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
597 	intel_de_posting_read(dev_priv, SPLL_CTL);
598 
599 	/*
600 	 * Try to set up the PCH reference clock once all DPLLs
601 	 * that depend on it have been shut down.
602 	 */
603 	if (dev_priv->pch_ssc_use & BIT(id))
604 		intel_init_pch_refclk(dev_priv);
605 }
606 
607 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
608 				       struct intel_shared_dpll *pll,
609 				       struct intel_dpll_hw_state *hw_state)
610 {
611 	const enum intel_dpll_id id = pll->info->id;
612 	intel_wakeref_t wakeref;
613 	u32 val;
614 
615 	wakeref = intel_display_power_get_if_enabled(dev_priv,
616 						     POWER_DOMAIN_DISPLAY_CORE);
617 	if (!wakeref)
618 		return false;
619 
620 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
621 	hw_state->wrpll = val;
622 
623 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
624 
625 	return val & WRPLL_PLL_ENABLE;
626 }
627 
628 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
629 				      struct intel_shared_dpll *pll,
630 				      struct intel_dpll_hw_state *hw_state)
631 {
632 	intel_wakeref_t wakeref;
633 	u32 val;
634 
635 	wakeref = intel_display_power_get_if_enabled(dev_priv,
636 						     POWER_DOMAIN_DISPLAY_CORE);
637 	if (!wakeref)
638 		return false;
639 
640 	val = intel_de_read(dev_priv, SPLL_CTL);
641 	hw_state->spll = val;
642 
643 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
644 
645 	return val & SPLL_PLL_ENABLE;
646 }
647 
648 #define LC_FREQ 2700
649 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
650 
651 #define P_MIN 2
652 #define P_MAX 64
653 #define P_INC 2
654 
655 /* Constraints for PLL good behavior */
656 #define REF_MIN 48
657 #define REF_MAX 400
658 #define VCO_MIN 2400
659 #define VCO_MAX 4800
660 
661 struct hsw_wrpll_rnp {
662 	unsigned p, n2, r2;
663 };
664 
665 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
666 {
667 	unsigned budget;
668 
669 	switch (clock) {
670 	case 25175000:
671 	case 25200000:
672 	case 27000000:
673 	case 27027000:
674 	case 37762500:
675 	case 37800000:
676 	case 40500000:
677 	case 40541000:
678 	case 54000000:
679 	case 54054000:
680 	case 59341000:
681 	case 59400000:
682 	case 72000000:
683 	case 74176000:
684 	case 74250000:
685 	case 81000000:
686 	case 81081000:
687 	case 89012000:
688 	case 89100000:
689 	case 108000000:
690 	case 108108000:
691 	case 111264000:
692 	case 111375000:
693 	case 148352000:
694 	case 148500000:
695 	case 162000000:
696 	case 162162000:
697 	case 222525000:
698 	case 222750000:
699 	case 296703000:
700 	case 297000000:
701 		budget = 0;
702 		break;
703 	case 233500000:
704 	case 245250000:
705 	case 247750000:
706 	case 253250000:
707 	case 298000000:
708 		budget = 1500;
709 		break;
710 	case 169128000:
711 	case 169500000:
712 	case 179500000:
713 	case 202000000:
714 		budget = 2000;
715 		break;
716 	case 256250000:
717 	case 262500000:
718 	case 270000000:
719 	case 272500000:
720 	case 273750000:
721 	case 280750000:
722 	case 281250000:
723 	case 286000000:
724 	case 291750000:
725 		budget = 4000;
726 		break;
727 	case 267250000:
728 	case 268500000:
729 		budget = 5000;
730 		break;
731 	default:
732 		budget = 1000;
733 		break;
734 	}
735 
736 	return budget;
737 }
738 
739 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
740 				 unsigned int r2, unsigned int n2,
741 				 unsigned int p,
742 				 struct hsw_wrpll_rnp *best)
743 {
744 	u64 a, b, c, d, diff, diff_best;
745 
746 	/* No best (r,n,p) yet */
747 	if (best->p == 0) {
748 		best->p = p;
749 		best->n2 = n2;
750 		best->r2 = r2;
751 		return;
752 	}
753 
754 	/*
755 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
756 	 * freq2k.
757 	 *
758 	 * delta = 1e6 *
759 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
760 	 *	   freq2k;
761 	 *
762 	 * and we would like delta <= budget.
763 	 *
764 	 * If the discrepancy is above the PPM-based budget, always prefer to
765 	 * improve upon the previous solution.  However, if you're within the
766 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
767 	 */
768 	a = freq2k * budget * p * r2;
769 	b = freq2k * budget * best->p * best->r2;
770 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
771 	diff_best = abs_diff(freq2k * best->p * best->r2,
772 			     LC_FREQ_2K * best->n2);
773 	c = 1000000 * diff;
774 	d = 1000000 * diff_best;
775 
776 	if (a < c && b < d) {
777 		/* If both are above the budget, pick the closer */
778 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
779 			best->p = p;
780 			best->n2 = n2;
781 			best->r2 = r2;
782 		}
783 	} else if (a >= c && b < d) {
784 		/* If A is below the threshold but B is above it?  Update. */
785 		best->p = p;
786 		best->n2 = n2;
787 		best->r2 = r2;
788 	} else if (a >= c && b >= d) {
789 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
790 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
791 			best->p = p;
792 			best->n2 = n2;
793 			best->r2 = r2;
794 		}
795 	}
796 	/* Otherwise a < c && b >= d, do nothing */
797 }
798 
799 static void
800 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
801 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
802 {
803 	u64 freq2k;
804 	unsigned p, n2, r2;
805 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
806 	unsigned budget;
807 
808 	freq2k = clock / 100;
809 
810 	budget = hsw_wrpll_get_budget_for_freq(clock);
811 
812 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
813 	 * and directly pass the LC PLL to it. */
814 	if (freq2k == 5400000) {
815 		*n2_out = 2;
816 		*p_out = 1;
817 		*r2_out = 2;
818 		return;
819 	}
820 
821 	/*
822 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
823 	 * the WR PLL.
824 	 *
825 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
826 	 * Injecting R2 = 2 * R gives:
827 	 *   REF_MAX * r2 > LC_FREQ * 2 and
828 	 *   REF_MIN * r2 < LC_FREQ * 2
829 	 *
830 	 * Which means the desired boundaries for r2 are:
831 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
832 	 *
833 	 */
834 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
835 	     r2 <= LC_FREQ * 2 / REF_MIN;
836 	     r2++) {
837 
838 		/*
839 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
840 		 *
841 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
842 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
843 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
844 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
845 		 *
846 		 * Which means the desired boundaries for n2 are:
847 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
848 		 */
849 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
850 		     n2 <= VCO_MAX * r2 / LC_FREQ;
851 		     n2++) {
852 
853 			for (p = P_MIN; p <= P_MAX; p += P_INC)
854 				hsw_wrpll_update_rnp(freq2k, budget,
855 						     r2, n2, p, &best);
856 		}
857 	}
858 
859 	*n2_out = best.n2;
860 	*p_out = best.p;
861 	*r2_out = best.r2;
862 }
863 
864 static struct intel_shared_dpll *
865 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
866 		       struct intel_crtc *crtc)
867 {
868 	struct intel_crtc_state *crtc_state =
869 		intel_atomic_get_new_crtc_state(state, crtc);
870 	struct intel_shared_dpll *pll;
871 	u32 val;
872 	unsigned int p, n2, r2;
873 
874 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
875 
876 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
877 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
878 	      WRPLL_DIVIDER_POST(p);
879 
880 	crtc_state->dpll_hw_state.wrpll = val;
881 
882 	pll = intel_find_shared_dpll(state, crtc,
883 				     &crtc_state->dpll_hw_state,
884 				     BIT(DPLL_ID_WRPLL2) |
885 				     BIT(DPLL_ID_WRPLL1));
886 
887 	if (!pll)
888 		return NULL;
889 
890 	return pll;
891 }
892 
893 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
894 				  const struct intel_shared_dpll *pll)
895 {
896 	int refclk;
897 	int n, p, r;
898 	u32 wrpll = pll->state.hw_state.wrpll;
899 
900 	switch (wrpll & WRPLL_REF_MASK) {
901 	case WRPLL_REF_SPECIAL_HSW:
902 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
903 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
904 			refclk = dev_priv->dpll.ref_clks.nssc;
905 			break;
906 		}
907 		fallthrough;
908 	case WRPLL_REF_PCH_SSC:
909 		/*
910 		 * We could calculate spread here, but our checking
911 		 * code only cares about 5% accuracy, and spread is a max of
912 		 * 0.5% downspread.
913 		 */
914 		refclk = dev_priv->dpll.ref_clks.ssc;
915 		break;
916 	case WRPLL_REF_LCPLL:
917 		refclk = 2700000;
918 		break;
919 	default:
920 		MISSING_CASE(wrpll);
921 		return 0;
922 	}
923 
924 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
925 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
926 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
927 
928 	/* Convert to KHz, p & r have a fixed point portion */
929 	return (refclk * n / 10) / (p * r) * 2;
930 }
931 
932 static struct intel_shared_dpll *
933 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
934 {
935 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
936 	struct intel_shared_dpll *pll;
937 	enum intel_dpll_id pll_id;
938 	int clock = crtc_state->port_clock;
939 
940 	switch (clock / 2) {
941 	case 81000:
942 		pll_id = DPLL_ID_LCPLL_810;
943 		break;
944 	case 135000:
945 		pll_id = DPLL_ID_LCPLL_1350;
946 		break;
947 	case 270000:
948 		pll_id = DPLL_ID_LCPLL_2700;
949 		break;
950 	default:
951 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
952 			    clock);
953 		return NULL;
954 	}
955 
956 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
957 
958 	if (!pll)
959 		return NULL;
960 
961 	return pll;
962 }
963 
964 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
965 				  const struct intel_shared_dpll *pll)
966 {
967 	int link_clock = 0;
968 
969 	switch (pll->info->id) {
970 	case DPLL_ID_LCPLL_810:
971 		link_clock = 81000;
972 		break;
973 	case DPLL_ID_LCPLL_1350:
974 		link_clock = 135000;
975 		break;
976 	case DPLL_ID_LCPLL_2700:
977 		link_clock = 270000;
978 		break;
979 	default:
980 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
981 		break;
982 	}
983 
984 	return link_clock * 2;
985 }
986 
987 static struct intel_shared_dpll *
988 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
989 		      struct intel_crtc *crtc)
990 {
991 	struct intel_crtc_state *crtc_state =
992 		intel_atomic_get_new_crtc_state(state, crtc);
993 
994 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
995 		return NULL;
996 
997 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
998 					 SPLL_REF_MUXED_SSC;
999 
1000 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1001 				      BIT(DPLL_ID_SPLL));
1002 }
1003 
1004 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1005 				 const struct intel_shared_dpll *pll)
1006 {
1007 	int link_clock = 0;
1008 
1009 	switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
1010 	case SPLL_FREQ_810MHz:
1011 		link_clock = 81000;
1012 		break;
1013 	case SPLL_FREQ_1350MHz:
1014 		link_clock = 135000;
1015 		break;
1016 	case SPLL_FREQ_2700MHz:
1017 		link_clock = 270000;
1018 		break;
1019 	default:
1020 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1021 		break;
1022 	}
1023 
1024 	return link_clock * 2;
1025 }
1026 
1027 static bool hsw_get_dpll(struct intel_atomic_state *state,
1028 			 struct intel_crtc *crtc,
1029 			 struct intel_encoder *encoder)
1030 {
1031 	struct intel_crtc_state *crtc_state =
1032 		intel_atomic_get_new_crtc_state(state, crtc);
1033 	struct intel_shared_dpll *pll;
1034 
1035 	memset(&crtc_state->dpll_hw_state, 0,
1036 	       sizeof(crtc_state->dpll_hw_state));
1037 
1038 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1039 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1040 	else if (intel_crtc_has_dp_encoder(crtc_state))
1041 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1042 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1043 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1044 	else
1045 		return false;
1046 
1047 	if (!pll)
1048 		return false;
1049 
1050 	intel_reference_shared_dpll(state, crtc,
1051 				    pll, &crtc_state->dpll_hw_state);
1052 
1053 	crtc_state->shared_dpll = pll;
1054 
1055 	return true;
1056 }
1057 
1058 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1059 {
1060 	i915->dpll.ref_clks.ssc = 135000;
1061 	/* Non-SSC is only used on non-ULT HSW. */
1062 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1063 		i915->dpll.ref_clks.nssc = 24000;
1064 	else
1065 		i915->dpll.ref_clks.nssc = 135000;
1066 }
1067 
1068 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1069 			      const struct intel_dpll_hw_state *hw_state)
1070 {
1071 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1072 		    hw_state->wrpll, hw_state->spll);
1073 }
1074 
1075 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1076 	.enable = hsw_ddi_wrpll_enable,
1077 	.disable = hsw_ddi_wrpll_disable,
1078 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1079 	.get_freq = hsw_ddi_wrpll_get_freq,
1080 };
1081 
1082 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1083 	.enable = hsw_ddi_spll_enable,
1084 	.disable = hsw_ddi_spll_disable,
1085 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1086 	.get_freq = hsw_ddi_spll_get_freq,
1087 };
1088 
1089 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1090 				 struct intel_shared_dpll *pll)
1091 {
1092 }
1093 
1094 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1095 				  struct intel_shared_dpll *pll)
1096 {
1097 }
1098 
1099 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1100 				       struct intel_shared_dpll *pll,
1101 				       struct intel_dpll_hw_state *hw_state)
1102 {
1103 	return true;
1104 }
1105 
1106 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1107 	.enable = hsw_ddi_lcpll_enable,
1108 	.disable = hsw_ddi_lcpll_disable,
1109 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1110 	.get_freq = hsw_ddi_lcpll_get_freq,
1111 };
1112 
1113 static const struct dpll_info hsw_plls[] = {
1114 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1115 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1116 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1117 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1118 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1119 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1120 	{ },
1121 };
1122 
1123 static const struct intel_dpll_mgr hsw_pll_mgr = {
1124 	.dpll_info = hsw_plls,
1125 	.get_dplls = hsw_get_dpll,
1126 	.put_dplls = intel_put_dpll,
1127 	.update_ref_clks = hsw_update_dpll_ref_clks,
1128 	.dump_hw_state = hsw_dump_hw_state,
1129 };
1130 
1131 struct skl_dpll_regs {
1132 	i915_reg_t ctl, cfgcr1, cfgcr2;
1133 };
1134 
1135 /* this array is indexed by the *shared* pll id */
1136 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1137 	{
1138 		/* DPLL 0 */
1139 		.ctl = LCPLL1_CTL,
1140 		/* DPLL 0 doesn't support HDMI mode */
1141 	},
1142 	{
1143 		/* DPLL 1 */
1144 		.ctl = LCPLL2_CTL,
1145 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1146 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1147 	},
1148 	{
1149 		/* DPLL 2 */
1150 		.ctl = WRPLL_CTL(0),
1151 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1152 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1153 	},
1154 	{
1155 		/* DPLL 3 */
1156 		.ctl = WRPLL_CTL(1),
1157 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1158 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1159 	},
1160 };
1161 
1162 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1163 				    struct intel_shared_dpll *pll)
1164 {
1165 	const enum intel_dpll_id id = pll->info->id;
1166 	u32 val;
1167 
1168 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1169 
1170 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1171 		 DPLL_CTRL1_SSC(id) |
1172 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1173 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1174 
1175 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1176 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1177 }
1178 
1179 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1180 			       struct intel_shared_dpll *pll)
1181 {
1182 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1183 	const enum intel_dpll_id id = pll->info->id;
1184 
1185 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1186 
1187 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1188 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1189 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1190 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1191 
1192 	/* the enable bit is always bit 31 */
1193 	intel_de_write(dev_priv, regs[id].ctl,
1194 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1195 
1196 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1197 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1198 }
1199 
1200 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1201 				 struct intel_shared_dpll *pll)
1202 {
1203 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1204 }
1205 
1206 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1207 				struct intel_shared_dpll *pll)
1208 {
1209 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1210 	const enum intel_dpll_id id = pll->info->id;
1211 
1212 	/* the enable bit is always bit 31 */
1213 	intel_de_write(dev_priv, regs[id].ctl,
1214 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1215 	intel_de_posting_read(dev_priv, regs[id].ctl);
1216 }
1217 
1218 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1219 				  struct intel_shared_dpll *pll)
1220 {
1221 }
1222 
1223 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1224 				     struct intel_shared_dpll *pll,
1225 				     struct intel_dpll_hw_state *hw_state)
1226 {
1227 	u32 val;
1228 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1229 	const enum intel_dpll_id id = pll->info->id;
1230 	intel_wakeref_t wakeref;
1231 	bool ret;
1232 
1233 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1234 						     POWER_DOMAIN_DISPLAY_CORE);
1235 	if (!wakeref)
1236 		return false;
1237 
1238 	ret = false;
1239 
1240 	val = intel_de_read(dev_priv, regs[id].ctl);
1241 	if (!(val & LCPLL_PLL_ENABLE))
1242 		goto out;
1243 
1244 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1245 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1246 
1247 	/* avoid reading back stale values if HDMI mode is not enabled */
1248 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1249 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1250 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1251 	}
1252 	ret = true;
1253 
1254 out:
1255 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1256 
1257 	return ret;
1258 }
1259 
1260 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1261 				       struct intel_shared_dpll *pll,
1262 				       struct intel_dpll_hw_state *hw_state)
1263 {
1264 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1265 	const enum intel_dpll_id id = pll->info->id;
1266 	intel_wakeref_t wakeref;
1267 	u32 val;
1268 	bool ret;
1269 
1270 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1271 						     POWER_DOMAIN_DISPLAY_CORE);
1272 	if (!wakeref)
1273 		return false;
1274 
1275 	ret = false;
1276 
1277 	/* DPLL0 is always enabled since it drives CDCLK */
1278 	val = intel_de_read(dev_priv, regs[id].ctl);
1279 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1280 		goto out;
1281 
1282 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1283 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1284 
1285 	ret = true;
1286 
1287 out:
1288 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1289 
1290 	return ret;
1291 }
1292 
1293 struct skl_wrpll_context {
1294 	u64 min_deviation;		/* current minimal deviation */
1295 	u64 central_freq;		/* chosen central freq */
1296 	u64 dco_freq;			/* chosen dco freq */
1297 	unsigned int p;			/* chosen divider */
1298 };
1299 
1300 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1301 {
1302 	memset(ctx, 0, sizeof(*ctx));
1303 
1304 	ctx->min_deviation = U64_MAX;
1305 }
1306 
1307 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1308 #define SKL_DCO_MAX_PDEVIATION	100
1309 #define SKL_DCO_MAX_NDEVIATION	600
1310 
1311 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1312 				  u64 central_freq,
1313 				  u64 dco_freq,
1314 				  unsigned int divider)
1315 {
1316 	u64 deviation;
1317 
1318 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1319 			      central_freq);
1320 
1321 	/* positive deviation */
1322 	if (dco_freq >= central_freq) {
1323 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1324 		    deviation < ctx->min_deviation) {
1325 			ctx->min_deviation = deviation;
1326 			ctx->central_freq = central_freq;
1327 			ctx->dco_freq = dco_freq;
1328 			ctx->p = divider;
1329 		}
1330 	/* negative deviation */
1331 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1332 		   deviation < ctx->min_deviation) {
1333 		ctx->min_deviation = deviation;
1334 		ctx->central_freq = central_freq;
1335 		ctx->dco_freq = dco_freq;
1336 		ctx->p = divider;
1337 	}
1338 }
1339 
1340 static void skl_wrpll_get_multipliers(unsigned int p,
1341 				      unsigned int *p0 /* out */,
1342 				      unsigned int *p1 /* out */,
1343 				      unsigned int *p2 /* out */)
1344 {
1345 	/* even dividers */
1346 	if (p % 2 == 0) {
1347 		unsigned int half = p / 2;
1348 
1349 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1350 			*p0 = 2;
1351 			*p1 = 1;
1352 			*p2 = half;
1353 		} else if (half % 2 == 0) {
1354 			*p0 = 2;
1355 			*p1 = half / 2;
1356 			*p2 = 2;
1357 		} else if (half % 3 == 0) {
1358 			*p0 = 3;
1359 			*p1 = half / 3;
1360 			*p2 = 2;
1361 		} else if (half % 7 == 0) {
1362 			*p0 = 7;
1363 			*p1 = half / 7;
1364 			*p2 = 2;
1365 		}
1366 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1367 		*p0 = 3;
1368 		*p1 = 1;
1369 		*p2 = p / 3;
1370 	} else if (p == 5 || p == 7) {
1371 		*p0 = p;
1372 		*p1 = 1;
1373 		*p2 = 1;
1374 	} else if (p == 15) {
1375 		*p0 = 3;
1376 		*p1 = 1;
1377 		*p2 = 5;
1378 	} else if (p == 21) {
1379 		*p0 = 7;
1380 		*p1 = 1;
1381 		*p2 = 3;
1382 	} else if (p == 35) {
1383 		*p0 = 7;
1384 		*p1 = 1;
1385 		*p2 = 5;
1386 	}
1387 }
1388 
1389 struct skl_wrpll_params {
1390 	u32 dco_fraction;
1391 	u32 dco_integer;
1392 	u32 qdiv_ratio;
1393 	u32 qdiv_mode;
1394 	u32 kdiv;
1395 	u32 pdiv;
1396 	u32 central_freq;
1397 };
1398 
1399 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1400 				      u64 afe_clock,
1401 				      int ref_clock,
1402 				      u64 central_freq,
1403 				      u32 p0, u32 p1, u32 p2)
1404 {
1405 	u64 dco_freq;
1406 
1407 	switch (central_freq) {
1408 	case 9600000000ULL:
1409 		params->central_freq = 0;
1410 		break;
1411 	case 9000000000ULL:
1412 		params->central_freq = 1;
1413 		break;
1414 	case 8400000000ULL:
1415 		params->central_freq = 3;
1416 	}
1417 
1418 	switch (p0) {
1419 	case 1:
1420 		params->pdiv = 0;
1421 		break;
1422 	case 2:
1423 		params->pdiv = 1;
1424 		break;
1425 	case 3:
1426 		params->pdiv = 2;
1427 		break;
1428 	case 7:
1429 		params->pdiv = 4;
1430 		break;
1431 	default:
1432 		WARN(1, "Incorrect PDiv\n");
1433 	}
1434 
1435 	switch (p2) {
1436 	case 5:
1437 		params->kdiv = 0;
1438 		break;
1439 	case 2:
1440 		params->kdiv = 1;
1441 		break;
1442 	case 3:
1443 		params->kdiv = 2;
1444 		break;
1445 	case 1:
1446 		params->kdiv = 3;
1447 		break;
1448 	default:
1449 		WARN(1, "Incorrect KDiv\n");
1450 	}
1451 
1452 	params->qdiv_ratio = p1;
1453 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1454 
1455 	dco_freq = p0 * p1 * p2 * afe_clock;
1456 
1457 	/*
1458 	 * Intermediate values are in Hz.
1459 	 * Divide by MHz to match bsepc
1460 	 */
1461 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1462 	params->dco_fraction =
1463 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1464 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1465 }
1466 
1467 static bool
1468 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1469 			int ref_clock,
1470 			struct skl_wrpll_params *wrpll_params)
1471 {
1472 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1473 	u64 dco_central_freq[3] = { 8400000000ULL,
1474 				    9000000000ULL,
1475 				    9600000000ULL };
1476 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1477 					     24, 28, 30, 32, 36, 40, 42, 44,
1478 					     48, 52, 54, 56, 60, 64, 66, 68,
1479 					     70, 72, 76, 78, 80, 84, 88, 90,
1480 					     92, 96, 98 };
1481 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1482 	static const struct {
1483 		const int *list;
1484 		int n_dividers;
1485 	} dividers[] = {
1486 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1487 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1488 	};
1489 	struct skl_wrpll_context ctx;
1490 	unsigned int dco, d, i;
1491 	unsigned int p0, p1, p2;
1492 
1493 	skl_wrpll_context_init(&ctx);
1494 
1495 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1496 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1497 			for (i = 0; i < dividers[d].n_dividers; i++) {
1498 				unsigned int p = dividers[d].list[i];
1499 				u64 dco_freq = p * afe_clock;
1500 
1501 				skl_wrpll_try_divider(&ctx,
1502 						      dco_central_freq[dco],
1503 						      dco_freq,
1504 						      p);
1505 				/*
1506 				 * Skip the remaining dividers if we're sure to
1507 				 * have found the definitive divider, we can't
1508 				 * improve a 0 deviation.
1509 				 */
1510 				if (ctx.min_deviation == 0)
1511 					goto skip_remaining_dividers;
1512 			}
1513 		}
1514 
1515 skip_remaining_dividers:
1516 		/*
1517 		 * If a solution is found with an even divider, prefer
1518 		 * this one.
1519 		 */
1520 		if (d == 0 && ctx.p)
1521 			break;
1522 	}
1523 
1524 	if (!ctx.p) {
1525 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1526 		return false;
1527 	}
1528 
1529 	/*
1530 	 * gcc incorrectly analyses that these can be used without being
1531 	 * initialized. To be fair, it's hard to guess.
1532 	 */
1533 	p0 = p1 = p2 = 0;
1534 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1535 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1536 				  ctx.central_freq, p0, p1, p2);
1537 
1538 	return true;
1539 }
1540 
1541 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1542 {
1543 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1544 	u32 ctrl1, cfgcr1, cfgcr2;
1545 	struct skl_wrpll_params wrpll_params = { 0, };
1546 
1547 	/*
1548 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1549 	 * as the DPLL id in this function.
1550 	 */
1551 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1552 
1553 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1554 
1555 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1556 				     i915->dpll.ref_clks.nssc,
1557 				     &wrpll_params))
1558 		return false;
1559 
1560 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1561 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1562 		wrpll_params.dco_integer;
1563 
1564 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1565 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1566 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1567 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1568 		wrpll_params.central_freq;
1569 
1570 	memset(&crtc_state->dpll_hw_state, 0,
1571 	       sizeof(crtc_state->dpll_hw_state));
1572 
1573 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1574 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1575 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1576 	return true;
1577 }
1578 
1579 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1580 				  const struct intel_shared_dpll *pll)
1581 {
1582 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
1583 	int ref_clock = i915->dpll.ref_clks.nssc;
1584 	u32 p0, p1, p2, dco_freq;
1585 
1586 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1587 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1588 
1589 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1590 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1591 	else
1592 		p1 = 1;
1593 
1594 
1595 	switch (p0) {
1596 	case DPLL_CFGCR2_PDIV_1:
1597 		p0 = 1;
1598 		break;
1599 	case DPLL_CFGCR2_PDIV_2:
1600 		p0 = 2;
1601 		break;
1602 	case DPLL_CFGCR2_PDIV_3:
1603 		p0 = 3;
1604 		break;
1605 	case DPLL_CFGCR2_PDIV_7_INVALID:
1606 		/*
1607 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1608 		 * handling it the same way as PDIV_7.
1609 		 */
1610 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1611 		fallthrough;
1612 	case DPLL_CFGCR2_PDIV_7:
1613 		p0 = 7;
1614 		break;
1615 	default:
1616 		MISSING_CASE(p0);
1617 		return 0;
1618 	}
1619 
1620 	switch (p2) {
1621 	case DPLL_CFGCR2_KDIV_5:
1622 		p2 = 5;
1623 		break;
1624 	case DPLL_CFGCR2_KDIV_2:
1625 		p2 = 2;
1626 		break;
1627 	case DPLL_CFGCR2_KDIV_3:
1628 		p2 = 3;
1629 		break;
1630 	case DPLL_CFGCR2_KDIV_1:
1631 		p2 = 1;
1632 		break;
1633 	default:
1634 		MISSING_CASE(p2);
1635 		return 0;
1636 	}
1637 
1638 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1639 		   ref_clock;
1640 
1641 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1642 		    ref_clock / 0x8000;
1643 
1644 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1645 		return 0;
1646 
1647 	return dco_freq / (p0 * p1 * p2 * 5);
1648 }
1649 
1650 static bool
1651 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1652 {
1653 	u32 ctrl1;
1654 
1655 	/*
1656 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1657 	 * as the DPLL id in this function.
1658 	 */
1659 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1660 	switch (crtc_state->port_clock / 2) {
1661 	case 81000:
1662 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1663 		break;
1664 	case 135000:
1665 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1666 		break;
1667 	case 270000:
1668 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1669 		break;
1670 		/* eDP 1.4 rates */
1671 	case 162000:
1672 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1673 		break;
1674 	case 108000:
1675 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1676 		break;
1677 	case 216000:
1678 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1679 		break;
1680 	}
1681 
1682 	memset(&crtc_state->dpll_hw_state, 0,
1683 	       sizeof(crtc_state->dpll_hw_state));
1684 
1685 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1686 
1687 	return true;
1688 }
1689 
1690 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1691 				  const struct intel_shared_dpll *pll)
1692 {
1693 	int link_clock = 0;
1694 
1695 	switch ((pll->state.hw_state.ctrl1 &
1696 		 DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1697 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1698 	case DPLL_CTRL1_LINK_RATE_810:
1699 		link_clock = 81000;
1700 		break;
1701 	case DPLL_CTRL1_LINK_RATE_1080:
1702 		link_clock = 108000;
1703 		break;
1704 	case DPLL_CTRL1_LINK_RATE_1350:
1705 		link_clock = 135000;
1706 		break;
1707 	case DPLL_CTRL1_LINK_RATE_1620:
1708 		link_clock = 162000;
1709 		break;
1710 	case DPLL_CTRL1_LINK_RATE_2160:
1711 		link_clock = 216000;
1712 		break;
1713 	case DPLL_CTRL1_LINK_RATE_2700:
1714 		link_clock = 270000;
1715 		break;
1716 	default:
1717 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1718 		break;
1719 	}
1720 
1721 	return link_clock * 2;
1722 }
1723 
1724 static bool skl_get_dpll(struct intel_atomic_state *state,
1725 			 struct intel_crtc *crtc,
1726 			 struct intel_encoder *encoder)
1727 {
1728 	struct intel_crtc_state *crtc_state =
1729 		intel_atomic_get_new_crtc_state(state, crtc);
1730 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1731 	struct intel_shared_dpll *pll;
1732 	bool bret;
1733 
1734 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1735 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1736 		if (!bret) {
1737 			drm_dbg_kms(&i915->drm,
1738 				    "Could not get HDMI pll dividers.\n");
1739 			return false;
1740 		}
1741 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1742 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1743 		if (!bret) {
1744 			drm_dbg_kms(&i915->drm,
1745 				    "Could not set DP dpll HW state.\n");
1746 			return false;
1747 		}
1748 	} else {
1749 		return false;
1750 	}
1751 
1752 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1753 		pll = intel_find_shared_dpll(state, crtc,
1754 					     &crtc_state->dpll_hw_state,
1755 					     BIT(DPLL_ID_SKL_DPLL0));
1756 	else
1757 		pll = intel_find_shared_dpll(state, crtc,
1758 					     &crtc_state->dpll_hw_state,
1759 					     BIT(DPLL_ID_SKL_DPLL3) |
1760 					     BIT(DPLL_ID_SKL_DPLL2) |
1761 					     BIT(DPLL_ID_SKL_DPLL1));
1762 	if (!pll)
1763 		return false;
1764 
1765 	intel_reference_shared_dpll(state, crtc,
1766 				    pll, &crtc_state->dpll_hw_state);
1767 
1768 	crtc_state->shared_dpll = pll;
1769 
1770 	return true;
1771 }
1772 
1773 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1774 				const struct intel_shared_dpll *pll)
1775 {
1776 	/*
1777 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1778 	 * the internal shift for each field
1779 	 */
1780 	if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1781 		return skl_ddi_wrpll_get_freq(i915, pll);
1782 	else
1783 		return skl_ddi_lcpll_get_freq(i915, pll);
1784 }
1785 
1786 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1787 {
1788 	/* No SSC ref */
1789 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1790 }
1791 
1792 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1793 			      const struct intel_dpll_hw_state *hw_state)
1794 {
1795 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1796 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1797 		      hw_state->ctrl1,
1798 		      hw_state->cfgcr1,
1799 		      hw_state->cfgcr2);
1800 }
1801 
1802 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1803 	.enable = skl_ddi_pll_enable,
1804 	.disable = skl_ddi_pll_disable,
1805 	.get_hw_state = skl_ddi_pll_get_hw_state,
1806 	.get_freq = skl_ddi_pll_get_freq,
1807 };
1808 
1809 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1810 	.enable = skl_ddi_dpll0_enable,
1811 	.disable = skl_ddi_dpll0_disable,
1812 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1813 	.get_freq = skl_ddi_pll_get_freq,
1814 };
1815 
1816 static const struct dpll_info skl_plls[] = {
1817 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1818 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1819 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1820 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1821 	{ },
1822 };
1823 
1824 static const struct intel_dpll_mgr skl_pll_mgr = {
1825 	.dpll_info = skl_plls,
1826 	.get_dplls = skl_get_dpll,
1827 	.put_dplls = intel_put_dpll,
1828 	.update_ref_clks = skl_update_dpll_ref_clks,
1829 	.dump_hw_state = skl_dump_hw_state,
1830 };
1831 
1832 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1833 				struct intel_shared_dpll *pll)
1834 {
1835 	u32 temp;
1836 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1837 	enum dpio_phy phy;
1838 	enum dpio_channel ch;
1839 
1840 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1841 
1842 	/* Non-SSC reference */
1843 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1844 	temp |= PORT_PLL_REF_SEL;
1845 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1846 
1847 	if (IS_GEMINILAKE(dev_priv)) {
1848 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1849 		temp |= PORT_PLL_POWER_ENABLE;
1850 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1851 
1852 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1853 				 PORT_PLL_POWER_STATE), 200))
1854 			drm_err(&dev_priv->drm,
1855 				"Power state not set for PLL:%d\n", port);
1856 	}
1857 
1858 	/* Disable 10 bit clock */
1859 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1860 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1861 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1862 
1863 	/* Write P1 & P2 */
1864 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1865 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1866 	temp |= pll->state.hw_state.ebb0;
1867 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1868 
1869 	/* Write M2 integer */
1870 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1871 	temp &= ~PORT_PLL_M2_MASK;
1872 	temp |= pll->state.hw_state.pll0;
1873 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1874 
1875 	/* Write N */
1876 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1877 	temp &= ~PORT_PLL_N_MASK;
1878 	temp |= pll->state.hw_state.pll1;
1879 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1880 
1881 	/* Write M2 fraction */
1882 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1883 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1884 	temp |= pll->state.hw_state.pll2;
1885 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1886 
1887 	/* Write M2 fraction enable */
1888 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1889 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1890 	temp |= pll->state.hw_state.pll3;
1891 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1892 
1893 	/* Write coeff */
1894 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1895 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1896 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1897 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1898 	temp |= pll->state.hw_state.pll6;
1899 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1900 
1901 	/* Write calibration val */
1902 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1903 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1904 	temp |= pll->state.hw_state.pll8;
1905 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1906 
1907 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1908 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1909 	temp |= pll->state.hw_state.pll9;
1910 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1911 
1912 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1913 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1914 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1915 	temp |= pll->state.hw_state.pll10;
1916 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1917 
1918 	/* Recalibrate with new settings */
1919 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1920 	temp |= PORT_PLL_RECALIBRATE;
1921 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1922 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1923 	temp |= pll->state.hw_state.ebb4;
1924 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1925 
1926 	/* Enable PLL */
1927 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1928 	temp |= PORT_PLL_ENABLE;
1929 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1930 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1931 
1932 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1933 			200))
1934 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1935 
1936 	if (IS_GEMINILAKE(dev_priv)) {
1937 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1938 		temp |= DCC_DELAY_RANGE_2;
1939 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1940 	}
1941 
1942 	/*
1943 	 * While we write to the group register to program all lanes at once we
1944 	 * can read only lane registers and we pick lanes 0/1 for that.
1945 	 */
1946 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1947 	temp &= ~LANE_STAGGER_MASK;
1948 	temp &= ~LANESTAGGER_STRAP_OVRD;
1949 	temp |= pll->state.hw_state.pcsdw12;
1950 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1951 }
1952 
1953 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1954 					struct intel_shared_dpll *pll)
1955 {
1956 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1957 	u32 temp;
1958 
1959 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1960 	temp &= ~PORT_PLL_ENABLE;
1961 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1962 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1963 
1964 	if (IS_GEMINILAKE(dev_priv)) {
1965 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1966 		temp &= ~PORT_PLL_POWER_ENABLE;
1967 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1968 
1969 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1970 				  PORT_PLL_POWER_STATE), 200))
1971 			drm_err(&dev_priv->drm,
1972 				"Power state not reset for PLL:%d\n", port);
1973 	}
1974 }
1975 
1976 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1977 					struct intel_shared_dpll *pll,
1978 					struct intel_dpll_hw_state *hw_state)
1979 {
1980 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1981 	intel_wakeref_t wakeref;
1982 	enum dpio_phy phy;
1983 	enum dpio_channel ch;
1984 	u32 val;
1985 	bool ret;
1986 
1987 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1988 
1989 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1990 						     POWER_DOMAIN_DISPLAY_CORE);
1991 	if (!wakeref)
1992 		return false;
1993 
1994 	ret = false;
1995 
1996 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1997 	if (!(val & PORT_PLL_ENABLE))
1998 		goto out;
1999 
2000 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2001 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2002 
2003 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2004 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2005 
2006 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2007 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2008 
2009 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2010 	hw_state->pll1 &= PORT_PLL_N_MASK;
2011 
2012 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2013 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2014 
2015 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2016 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2017 
2018 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2019 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2020 			  PORT_PLL_INT_COEFF_MASK |
2021 			  PORT_PLL_GAIN_CTL_MASK;
2022 
2023 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2024 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2025 
2026 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2027 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2028 
2029 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2030 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2031 			   PORT_PLL_DCO_AMP_MASK;
2032 
2033 	/*
2034 	 * While we write to the group register to program all lanes at once we
2035 	 * can read only lane registers. We configure all lanes the same way, so
2036 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2037 	 */
2038 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2039 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2040 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2041 		drm_dbg(&dev_priv->drm,
2042 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2043 			hw_state->pcsdw12,
2044 			intel_de_read(dev_priv,
2045 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2046 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2047 
2048 	ret = true;
2049 
2050 out:
2051 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2052 
2053 	return ret;
2054 }
2055 
2056 /* bxt clock parameters */
2057 struct bxt_clk_div {
2058 	int clock;
2059 	u32 p1;
2060 	u32 p2;
2061 	u32 m2_int;
2062 	u32 m2_frac;
2063 	bool m2_frac_en;
2064 	u32 n;
2065 
2066 	int vco;
2067 };
2068 
2069 /* pre-calculated values for DP linkrates */
2070 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2071 	{162000, 4, 2, 32, 1677722, 1, 1},
2072 	{270000, 4, 1, 27,       0, 0, 1},
2073 	{540000, 2, 1, 27,       0, 0, 1},
2074 	{216000, 3, 2, 32, 1677722, 1, 1},
2075 	{243000, 4, 1, 24, 1258291, 1, 1},
2076 	{324000, 4, 1, 32, 1677722, 1, 1},
2077 	{432000, 3, 1, 32, 1677722, 1, 1}
2078 };
2079 
2080 static bool
2081 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2082 			  struct bxt_clk_div *clk_div)
2083 {
2084 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2085 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2086 	struct dpll best_clock;
2087 
2088 	/* Calculate HDMI div */
2089 	/*
2090 	 * FIXME: tie the following calculation into
2091 	 * i9xx_crtc_compute_clock
2092 	 */
2093 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2094 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2095 			crtc_state->port_clock,
2096 			pipe_name(crtc->pipe));
2097 		return false;
2098 	}
2099 
2100 	clk_div->p1 = best_clock.p1;
2101 	clk_div->p2 = best_clock.p2;
2102 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2103 	clk_div->n = best_clock.n;
2104 	clk_div->m2_int = best_clock.m2 >> 22;
2105 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2106 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2107 
2108 	clk_div->vco = best_clock.vco;
2109 
2110 	return true;
2111 }
2112 
2113 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2114 				    struct bxt_clk_div *clk_div)
2115 {
2116 	int clock = crtc_state->port_clock;
2117 	int i;
2118 
2119 	*clk_div = bxt_dp_clk_val[0];
2120 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2121 		if (bxt_dp_clk_val[i].clock == clock) {
2122 			*clk_div = bxt_dp_clk_val[i];
2123 			break;
2124 		}
2125 	}
2126 
2127 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2128 }
2129 
2130 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2131 				      const struct bxt_clk_div *clk_div)
2132 {
2133 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2134 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2135 	int clock = crtc_state->port_clock;
2136 	int vco = clk_div->vco;
2137 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2138 	u32 lanestagger;
2139 
2140 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2141 
2142 	if (vco >= 6200000 && vco <= 6700000) {
2143 		prop_coef = 4;
2144 		int_coef = 9;
2145 		gain_ctl = 3;
2146 		targ_cnt = 8;
2147 	} else if ((vco > 5400000 && vco < 6200000) ||
2148 			(vco >= 4800000 && vco < 5400000)) {
2149 		prop_coef = 5;
2150 		int_coef = 11;
2151 		gain_ctl = 3;
2152 		targ_cnt = 9;
2153 	} else if (vco == 5400000) {
2154 		prop_coef = 3;
2155 		int_coef = 8;
2156 		gain_ctl = 1;
2157 		targ_cnt = 9;
2158 	} else {
2159 		drm_err(&i915->drm, "Invalid VCO\n");
2160 		return false;
2161 	}
2162 
2163 	if (clock > 270000)
2164 		lanestagger = 0x18;
2165 	else if (clock > 135000)
2166 		lanestagger = 0x0d;
2167 	else if (clock > 67000)
2168 		lanestagger = 0x07;
2169 	else if (clock > 33000)
2170 		lanestagger = 0x04;
2171 	else
2172 		lanestagger = 0x02;
2173 
2174 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2175 	dpll_hw_state->pll0 = clk_div->m2_int;
2176 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2177 	dpll_hw_state->pll2 = clk_div->m2_frac;
2178 
2179 	if (clk_div->m2_frac_en)
2180 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2181 
2182 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2183 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2184 
2185 	dpll_hw_state->pll8 = targ_cnt;
2186 
2187 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2188 
2189 	dpll_hw_state->pll10 =
2190 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2191 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2192 
2193 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2194 
2195 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2196 
2197 	return true;
2198 }
2199 
2200 static bool
2201 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2202 {
2203 	struct bxt_clk_div clk_div = {};
2204 
2205 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2206 
2207 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2208 }
2209 
2210 static bool
2211 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2212 {
2213 	struct bxt_clk_div clk_div = {};
2214 
2215 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2216 
2217 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2218 }
2219 
2220 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2221 				const struct intel_shared_dpll *pll)
2222 {
2223 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2224 	struct dpll clock;
2225 
2226 	clock.m1 = 2;
2227 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2228 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2229 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2230 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2231 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2232 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2233 
2234 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2235 }
2236 
2237 static bool bxt_get_dpll(struct intel_atomic_state *state,
2238 			 struct intel_crtc *crtc,
2239 			 struct intel_encoder *encoder)
2240 {
2241 	struct intel_crtc_state *crtc_state =
2242 		intel_atomic_get_new_crtc_state(state, crtc);
2243 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2244 	struct intel_shared_dpll *pll;
2245 	enum intel_dpll_id id;
2246 
2247 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2248 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2249 		return false;
2250 
2251 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2252 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2253 		return false;
2254 
2255 	/* 1:1 mapping between ports and PLLs */
2256 	id = (enum intel_dpll_id) encoder->port;
2257 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2258 
2259 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2260 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2261 
2262 	intel_reference_shared_dpll(state, crtc,
2263 				    pll, &crtc_state->dpll_hw_state);
2264 
2265 	crtc_state->shared_dpll = pll;
2266 
2267 	return true;
2268 }
2269 
2270 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2271 {
2272 	i915->dpll.ref_clks.ssc = 100000;
2273 	i915->dpll.ref_clks.nssc = 100000;
2274 	/* DSI non-SSC ref 19.2MHz */
2275 }
2276 
2277 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2278 			      const struct intel_dpll_hw_state *hw_state)
2279 {
2280 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2281 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2282 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2283 		    hw_state->ebb0,
2284 		    hw_state->ebb4,
2285 		    hw_state->pll0,
2286 		    hw_state->pll1,
2287 		    hw_state->pll2,
2288 		    hw_state->pll3,
2289 		    hw_state->pll6,
2290 		    hw_state->pll8,
2291 		    hw_state->pll9,
2292 		    hw_state->pll10,
2293 		    hw_state->pcsdw12);
2294 }
2295 
2296 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2297 	.enable = bxt_ddi_pll_enable,
2298 	.disable = bxt_ddi_pll_disable,
2299 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2300 	.get_freq = bxt_ddi_pll_get_freq,
2301 };
2302 
2303 static const struct dpll_info bxt_plls[] = {
2304 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2305 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2306 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2307 	{ },
2308 };
2309 
2310 static const struct intel_dpll_mgr bxt_pll_mgr = {
2311 	.dpll_info = bxt_plls,
2312 	.get_dplls = bxt_get_dpll,
2313 	.put_dplls = intel_put_dpll,
2314 	.update_ref_clks = bxt_update_dpll_ref_clks,
2315 	.dump_hw_state = bxt_dump_hw_state,
2316 };
2317 
2318 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2319 			       struct intel_shared_dpll *pll)
2320 {
2321 	const enum intel_dpll_id id = pll->info->id;
2322 	u32 val;
2323 
2324 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2325 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2326 	val |= PLL_POWER_ENABLE;
2327 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2328 
2329 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2330 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2331 				  PLL_POWER_STATE, 5))
2332 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2333 
2334 	/*
2335 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2336 	 * select DP mode, and set DP link rate.
2337 	 */
2338 	val = pll->state.hw_state.cfgcr0;
2339 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2340 
2341 	/* 4. Reab back to ensure writes completed */
2342 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2343 
2344 	/* 3. Configure DPLL_CFGCR0 */
2345 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2346 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2347 		val = pll->state.hw_state.cfgcr1;
2348 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2349 		/* 4. Reab back to ensure writes completed */
2350 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2351 	}
2352 
2353 	/*
2354 	 * 5. If the frequency will result in a change to the voltage
2355 	 * requirement, follow the Display Voltage Frequency Switching
2356 	 * Sequence Before Frequency Change
2357 	 *
2358 	 * Note: DVFS is actually handled via the cdclk code paths,
2359 	 * hence we do nothing here.
2360 	 */
2361 
2362 	/* 6. Enable DPLL in DPLL_ENABLE. */
2363 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2364 	val |= PLL_ENABLE;
2365 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2366 
2367 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2368 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2369 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2370 
2371 	/*
2372 	 * 8. If the frequency will result in a change to the voltage
2373 	 * requirement, follow the Display Voltage Frequency Switching
2374 	 * Sequence After Frequency Change
2375 	 *
2376 	 * Note: DVFS is actually handled via the cdclk code paths,
2377 	 * hence we do nothing here.
2378 	 */
2379 
2380 	/*
2381 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2382 	 * Done at intel_ddi_clk_select
2383 	 */
2384 }
2385 
2386 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2387 				struct intel_shared_dpll *pll)
2388 {
2389 	const enum intel_dpll_id id = pll->info->id;
2390 	u32 val;
2391 
2392 	/*
2393 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2394 	 * Done at intel_ddi_post_disable
2395 	 */
2396 
2397 	/*
2398 	 * 2. If the frequency will result in a change to the voltage
2399 	 * requirement, follow the Display Voltage Frequency Switching
2400 	 * Sequence Before Frequency Change
2401 	 *
2402 	 * Note: DVFS is actually handled via the cdclk code paths,
2403 	 * hence we do nothing here.
2404 	 */
2405 
2406 	/* 3. Disable DPLL through DPLL_ENABLE. */
2407 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2408 	val &= ~PLL_ENABLE;
2409 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2410 
2411 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2412 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2413 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2414 
2415 	/*
2416 	 * 5. If the frequency will result in a change to the voltage
2417 	 * requirement, follow the Display Voltage Frequency Switching
2418 	 * Sequence After Frequency Change
2419 	 *
2420 	 * Note: DVFS is actually handled via the cdclk code paths,
2421 	 * hence we do nothing here.
2422 	 */
2423 
2424 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2425 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2426 	val &= ~PLL_POWER_ENABLE;
2427 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2428 
2429 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2430 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2431 				    PLL_POWER_STATE, 5))
2432 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2433 }
2434 
2435 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2436 				     struct intel_shared_dpll *pll,
2437 				     struct intel_dpll_hw_state *hw_state)
2438 {
2439 	const enum intel_dpll_id id = pll->info->id;
2440 	intel_wakeref_t wakeref;
2441 	u32 val;
2442 	bool ret;
2443 
2444 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2445 						     POWER_DOMAIN_DISPLAY_CORE);
2446 	if (!wakeref)
2447 		return false;
2448 
2449 	ret = false;
2450 
2451 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2452 	if (!(val & PLL_ENABLE))
2453 		goto out;
2454 
2455 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2456 	hw_state->cfgcr0 = val;
2457 
2458 	/* avoid reading back stale values if HDMI mode is not enabled */
2459 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2460 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2461 						 CNL_DPLL_CFGCR1(id));
2462 	}
2463 	ret = true;
2464 
2465 out:
2466 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2467 
2468 	return ret;
2469 }
2470 
2471 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2472 				      int *qdiv, int *kdiv)
2473 {
2474 	/* even dividers */
2475 	if (bestdiv % 2 == 0) {
2476 		if (bestdiv == 2) {
2477 			*pdiv = 2;
2478 			*qdiv = 1;
2479 			*kdiv = 1;
2480 		} else if (bestdiv % 4 == 0) {
2481 			*pdiv = 2;
2482 			*qdiv = bestdiv / 4;
2483 			*kdiv = 2;
2484 		} else if (bestdiv % 6 == 0) {
2485 			*pdiv = 3;
2486 			*qdiv = bestdiv / 6;
2487 			*kdiv = 2;
2488 		} else if (bestdiv % 5 == 0) {
2489 			*pdiv = 5;
2490 			*qdiv = bestdiv / 10;
2491 			*kdiv = 2;
2492 		} else if (bestdiv % 14 == 0) {
2493 			*pdiv = 7;
2494 			*qdiv = bestdiv / 14;
2495 			*kdiv = 2;
2496 		}
2497 	} else {
2498 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2499 			*pdiv = bestdiv;
2500 			*qdiv = 1;
2501 			*kdiv = 1;
2502 		} else { /* 9, 15, 21 */
2503 			*pdiv = bestdiv / 3;
2504 			*qdiv = 1;
2505 			*kdiv = 3;
2506 		}
2507 	}
2508 }
2509 
2510 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2511 				      u32 dco_freq, u32 ref_freq,
2512 				      int pdiv, int qdiv, int kdiv)
2513 {
2514 	u32 dco;
2515 
2516 	switch (kdiv) {
2517 	case 1:
2518 		params->kdiv = 1;
2519 		break;
2520 	case 2:
2521 		params->kdiv = 2;
2522 		break;
2523 	case 3:
2524 		params->kdiv = 4;
2525 		break;
2526 	default:
2527 		WARN(1, "Incorrect KDiv\n");
2528 	}
2529 
2530 	switch (pdiv) {
2531 	case 2:
2532 		params->pdiv = 1;
2533 		break;
2534 	case 3:
2535 		params->pdiv = 2;
2536 		break;
2537 	case 5:
2538 		params->pdiv = 4;
2539 		break;
2540 	case 7:
2541 		params->pdiv = 8;
2542 		break;
2543 	default:
2544 		WARN(1, "Incorrect PDiv\n");
2545 	}
2546 
2547 	WARN_ON(kdiv != 2 && qdiv != 1);
2548 
2549 	params->qdiv_ratio = qdiv;
2550 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2551 
2552 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2553 
2554 	params->dco_integer = dco >> 15;
2555 	params->dco_fraction = dco & 0x7fff;
2556 }
2557 
2558 static bool
2559 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2560 			  struct skl_wrpll_params *wrpll_params,
2561 			  int ref_clock)
2562 {
2563 	u32 afe_clock = crtc_state->port_clock * 5;
2564 	u32 dco_min = 7998000;
2565 	u32 dco_max = 10000000;
2566 	u32 dco_mid = (dco_min + dco_max) / 2;
2567 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2568 					 18, 20, 24, 28, 30, 32,  36,  40,
2569 					 42, 44, 48, 50, 52, 54,  56,  60,
2570 					 64, 66, 68, 70, 72, 76,  78,  80,
2571 					 84, 88, 90, 92, 96, 98, 100, 102,
2572 					  3,  5,  7,  9, 15, 21 };
2573 	u32 dco, best_dco = 0, dco_centrality = 0;
2574 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2575 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2576 
2577 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2578 		dco = afe_clock * dividers[d];
2579 
2580 		if ((dco <= dco_max) && (dco >= dco_min)) {
2581 			dco_centrality = abs(dco - dco_mid);
2582 
2583 			if (dco_centrality < best_dco_centrality) {
2584 				best_dco_centrality = dco_centrality;
2585 				best_div = dividers[d];
2586 				best_dco = dco;
2587 			}
2588 		}
2589 	}
2590 
2591 	if (best_div == 0)
2592 		return false;
2593 
2594 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2595 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2596 				  pdiv, qdiv, kdiv);
2597 
2598 	return true;
2599 }
2600 
2601 static bool
2602 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2603 			struct skl_wrpll_params *wrpll_params)
2604 {
2605 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2606 
2607 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2608 					 i915->dpll.ref_clks.nssc);
2609 }
2610 
2611 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2612 {
2613 	u32 cfgcr0, cfgcr1;
2614 	struct skl_wrpll_params wrpll_params = { 0, };
2615 
2616 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2617 
2618 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2619 		return false;
2620 
2621 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2622 		wrpll_params.dco_integer;
2623 
2624 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2625 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2626 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2627 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2628 		DPLL_CFGCR1_CENTRAL_FREQ;
2629 
2630 	memset(&crtc_state->dpll_hw_state, 0,
2631 	       sizeof(crtc_state->dpll_hw_state));
2632 
2633 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2634 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2635 	return true;
2636 }
2637 
2638 /*
2639  * Display WA #22010492432: tgl
2640  * Program half of the nominal DCO divider fraction value.
2641  */
2642 static bool
2643 tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2644 {
2645 	return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400;
2646 }
2647 
2648 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2649 				    const struct intel_shared_dpll *pll,
2650 				    int ref_clock)
2651 {
2652 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2653 	u32 dco_fraction;
2654 	u32 p0, p1, p2, dco_freq;
2655 
2656 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2657 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2658 
2659 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2660 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2661 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2662 	else
2663 		p1 = 1;
2664 
2665 
2666 	switch (p0) {
2667 	case DPLL_CFGCR1_PDIV_2:
2668 		p0 = 2;
2669 		break;
2670 	case DPLL_CFGCR1_PDIV_3:
2671 		p0 = 3;
2672 		break;
2673 	case DPLL_CFGCR1_PDIV_5:
2674 		p0 = 5;
2675 		break;
2676 	case DPLL_CFGCR1_PDIV_7:
2677 		p0 = 7;
2678 		break;
2679 	}
2680 
2681 	switch (p2) {
2682 	case DPLL_CFGCR1_KDIV_1:
2683 		p2 = 1;
2684 		break;
2685 	case DPLL_CFGCR1_KDIV_2:
2686 		p2 = 2;
2687 		break;
2688 	case DPLL_CFGCR1_KDIV_3:
2689 		p2 = 3;
2690 		break;
2691 	}
2692 
2693 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2694 		   ref_clock;
2695 
2696 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2697 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2698 
2699 	if (tgl_combo_pll_div_frac_wa_needed(dev_priv))
2700 		dco_fraction *= 2;
2701 
2702 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2703 
2704 	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2705 		return 0;
2706 
2707 	return dco_freq / (p0 * p1 * p2 * 5);
2708 }
2709 
2710 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2711 				  const struct intel_shared_dpll *pll)
2712 {
2713 	return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
2714 }
2715 
2716 static bool
2717 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2718 {
2719 	u32 cfgcr0;
2720 
2721 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2722 
2723 	switch (crtc_state->port_clock / 2) {
2724 	case 81000:
2725 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2726 		break;
2727 	case 135000:
2728 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2729 		break;
2730 	case 270000:
2731 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2732 		break;
2733 		/* eDP 1.4 rates */
2734 	case 162000:
2735 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2736 		break;
2737 	case 108000:
2738 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2739 		break;
2740 	case 216000:
2741 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2742 		break;
2743 	case 324000:
2744 		/* Some SKUs may require elevated I/O voltage to support this */
2745 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2746 		break;
2747 	case 405000:
2748 		/* Some SKUs may require elevated I/O voltage to support this */
2749 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2750 		break;
2751 	}
2752 
2753 	memset(&crtc_state->dpll_hw_state, 0,
2754 	       sizeof(crtc_state->dpll_hw_state));
2755 
2756 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2757 
2758 	return true;
2759 }
2760 
2761 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2762 				  const struct intel_shared_dpll *pll)
2763 {
2764 	int link_clock = 0;
2765 
2766 	switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2767 	case DPLL_CFGCR0_LINK_RATE_810:
2768 		link_clock = 81000;
2769 		break;
2770 	case DPLL_CFGCR0_LINK_RATE_1080:
2771 		link_clock = 108000;
2772 		break;
2773 	case DPLL_CFGCR0_LINK_RATE_1350:
2774 		link_clock = 135000;
2775 		break;
2776 	case DPLL_CFGCR0_LINK_RATE_1620:
2777 		link_clock = 162000;
2778 		break;
2779 	case DPLL_CFGCR0_LINK_RATE_2160:
2780 		link_clock = 216000;
2781 		break;
2782 	case DPLL_CFGCR0_LINK_RATE_2700:
2783 		link_clock = 270000;
2784 		break;
2785 	case DPLL_CFGCR0_LINK_RATE_3240:
2786 		link_clock = 324000;
2787 		break;
2788 	case DPLL_CFGCR0_LINK_RATE_4050:
2789 		link_clock = 405000;
2790 		break;
2791 	default:
2792 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2793 		break;
2794 	}
2795 
2796 	return link_clock * 2;
2797 }
2798 
2799 static bool cnl_get_dpll(struct intel_atomic_state *state,
2800 			 struct intel_crtc *crtc,
2801 			 struct intel_encoder *encoder)
2802 {
2803 	struct intel_crtc_state *crtc_state =
2804 		intel_atomic_get_new_crtc_state(state, crtc);
2805 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2806 	struct intel_shared_dpll *pll;
2807 	bool bret;
2808 
2809 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2810 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2811 		if (!bret) {
2812 			drm_dbg_kms(&i915->drm,
2813 				    "Could not get HDMI pll dividers.\n");
2814 			return false;
2815 		}
2816 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2817 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2818 		if (!bret) {
2819 			drm_dbg_kms(&i915->drm,
2820 				    "Could not set DP dpll HW state.\n");
2821 			return false;
2822 		}
2823 	} else {
2824 		drm_dbg_kms(&i915->drm,
2825 			    "Skip DPLL setup for output_types 0x%x\n",
2826 			    crtc_state->output_types);
2827 		return false;
2828 	}
2829 
2830 	pll = intel_find_shared_dpll(state, crtc,
2831 				     &crtc_state->dpll_hw_state,
2832 				     BIT(DPLL_ID_SKL_DPLL2) |
2833 				     BIT(DPLL_ID_SKL_DPLL1) |
2834 				     BIT(DPLL_ID_SKL_DPLL0));
2835 	if (!pll) {
2836 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2837 		return false;
2838 	}
2839 
2840 	intel_reference_shared_dpll(state, crtc,
2841 				    pll, &crtc_state->dpll_hw_state);
2842 
2843 	crtc_state->shared_dpll = pll;
2844 
2845 	return true;
2846 }
2847 
2848 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2849 				const struct intel_shared_dpll *pll)
2850 {
2851 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2852 		return cnl_ddi_wrpll_get_freq(i915, pll);
2853 	else
2854 		return cnl_ddi_lcpll_get_freq(i915, pll);
2855 }
2856 
2857 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2858 {
2859 	/* No SSC reference */
2860 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2861 }
2862 
2863 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2864 			      const struct intel_dpll_hw_state *hw_state)
2865 {
2866 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2867 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2868 		    hw_state->cfgcr0,
2869 		    hw_state->cfgcr1);
2870 }
2871 
2872 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2873 	.enable = cnl_ddi_pll_enable,
2874 	.disable = cnl_ddi_pll_disable,
2875 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2876 	.get_freq = cnl_ddi_pll_get_freq,
2877 };
2878 
2879 static const struct dpll_info cnl_plls[] = {
2880 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2881 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2882 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2883 	{ },
2884 };
2885 
2886 static const struct intel_dpll_mgr cnl_pll_mgr = {
2887 	.dpll_info = cnl_plls,
2888 	.get_dplls = cnl_get_dpll,
2889 	.put_dplls = intel_put_dpll,
2890 	.update_ref_clks = cnl_update_dpll_ref_clks,
2891 	.dump_hw_state = cnl_dump_hw_state,
2892 };
2893 
2894 struct icl_combo_pll_params {
2895 	int clock;
2896 	struct skl_wrpll_params wrpll;
2897 };
2898 
2899 /*
2900  * These values alrea already adjusted: they're the bits we write to the
2901  * registers, not the logical values.
2902  */
2903 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2904 	{ 540000,
2905 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2906 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2907 	{ 270000,
2908 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2909 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2910 	{ 162000,
2911 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2912 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2913 	{ 324000,
2914 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2915 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2916 	{ 216000,
2917 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2918 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2919 	{ 432000,
2920 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2921 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2922 	{ 648000,
2923 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2924 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2925 	{ 810000,
2926 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2927 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2928 };
2929 
2930 
2931 /* Also used for 38.4 MHz values. */
2932 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2933 	{ 540000,
2934 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2935 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2936 	{ 270000,
2937 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2938 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2939 	{ 162000,
2940 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2941 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2942 	{ 324000,
2943 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2944 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2945 	{ 216000,
2946 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2947 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2948 	{ 432000,
2949 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2950 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2951 	{ 648000,
2952 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2953 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2954 	{ 810000,
2955 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2956 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2957 };
2958 
2959 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2960 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2961 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2962 };
2963 
2964 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2965 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2966 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2967 };
2968 
2969 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2970 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2971 	/* the following params are unused */
2972 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2973 };
2974 
2975 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2976 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2977 	/* the following params are unused */
2978 };
2979 
2980 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2981 				  struct skl_wrpll_params *pll_params)
2982 {
2983 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2984 	const struct icl_combo_pll_params *params =
2985 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2986 		icl_dp_combo_pll_24MHz_values :
2987 		icl_dp_combo_pll_19_2MHz_values;
2988 	int clock = crtc_state->port_clock;
2989 	int i;
2990 
2991 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2992 		if (clock == params[i].clock) {
2993 			*pll_params = params[i].wrpll;
2994 			return true;
2995 		}
2996 	}
2997 
2998 	MISSING_CASE(clock);
2999 	return false;
3000 }
3001 
3002 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
3003 			     struct skl_wrpll_params *pll_params)
3004 {
3005 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3006 
3007 	if (INTEL_GEN(dev_priv) >= 12) {
3008 		switch (dev_priv->dpll.ref_clks.nssc) {
3009 		default:
3010 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3011 			fallthrough;
3012 		case 19200:
3013 		case 38400:
3014 			*pll_params = tgl_tbt_pll_19_2MHz_values;
3015 			break;
3016 		case 24000:
3017 			*pll_params = tgl_tbt_pll_24MHz_values;
3018 			break;
3019 		}
3020 	} else {
3021 		switch (dev_priv->dpll.ref_clks.nssc) {
3022 		default:
3023 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3024 			fallthrough;
3025 		case 19200:
3026 		case 38400:
3027 			*pll_params = icl_tbt_pll_19_2MHz_values;
3028 			break;
3029 		case 24000:
3030 			*pll_params = icl_tbt_pll_24MHz_values;
3031 			break;
3032 		}
3033 	}
3034 
3035 	return true;
3036 }
3037 
3038 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3039 				    const struct intel_shared_dpll *pll)
3040 {
3041 	/*
3042 	 * The PLL outputs multiple frequencies at the same time, selection is
3043 	 * made at DDI clock mux level.
3044 	 */
3045 	drm_WARN_ON(&i915->drm, 1);
3046 
3047 	return 0;
3048 }
3049 
3050 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3051 {
3052 	int ref_clock = i915->dpll.ref_clks.nssc;
3053 
3054 	/*
3055 	 * For ICL+, the spec states: if reference frequency is 38.4,
3056 	 * use 19.2 because the DPLL automatically divides that by 2.
3057 	 */
3058 	if (ref_clock == 38400)
3059 		ref_clock = 19200;
3060 
3061 	return ref_clock;
3062 }
3063 
3064 static bool
3065 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3066 	       struct skl_wrpll_params *wrpll_params)
3067 {
3068 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3069 
3070 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3071 					 icl_wrpll_ref_clock(i915));
3072 }
3073 
3074 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3075 				      const struct intel_shared_dpll *pll)
3076 {
3077 	return __cnl_ddi_wrpll_get_freq(i915, pll,
3078 					icl_wrpll_ref_clock(i915));
3079 }
3080 
3081 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3082 				const struct skl_wrpll_params *pll_params,
3083 				struct intel_dpll_hw_state *pll_state)
3084 {
3085 	u32 dco_fraction = pll_params->dco_fraction;
3086 
3087 	memset(pll_state, 0, sizeof(*pll_state));
3088 
3089 	if (tgl_combo_pll_div_frac_wa_needed(i915))
3090 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3091 
3092 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3093 			    pll_params->dco_integer;
3094 
3095 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3096 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3097 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3098 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3099 
3100 	if (INTEL_GEN(i915) >= 12)
3101 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3102 	else
3103 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3104 }
3105 
3106 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3107 {
3108 	return id - DPLL_ID_ICL_MGPLL1;
3109 }
3110 
3111 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3112 {
3113 	return tc_port + DPLL_ID_ICL_MGPLL1;
3114 }
3115 
3116 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3117 				     u32 *target_dco_khz,
3118 				     struct intel_dpll_hw_state *state,
3119 				     bool is_dkl)
3120 {
3121 	u32 dco_min_freq, dco_max_freq;
3122 	int div1_vals[] = {7, 5, 3, 2};
3123 	unsigned int i;
3124 	int div2;
3125 
3126 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3127 	dco_max_freq = is_dp ? 8100000 : 10000000;
3128 
3129 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3130 		int div1 = div1_vals[i];
3131 
3132 		for (div2 = 10; div2 > 0; div2--) {
3133 			int dco = div1 * div2 * clock_khz * 5;
3134 			int a_divratio, tlinedrv, inputsel;
3135 			u32 hsdiv;
3136 
3137 			if (dco < dco_min_freq || dco > dco_max_freq)
3138 				continue;
3139 
3140 			if (div2 >= 2) {
3141 				/*
3142 				 * Note: a_divratio not matching TGL BSpec
3143 				 * algorithm but matching hardcoded values and
3144 				 * working on HW for DP alt-mode at least
3145 				 */
3146 				a_divratio = is_dp ? 10 : 5;
3147 				tlinedrv = is_dkl ? 1 : 2;
3148 			} else {
3149 				a_divratio = 5;
3150 				tlinedrv = 0;
3151 			}
3152 			inputsel = is_dp ? 0 : 1;
3153 
3154 			switch (div1) {
3155 			default:
3156 				MISSING_CASE(div1);
3157 				fallthrough;
3158 			case 2:
3159 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3160 				break;
3161 			case 3:
3162 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3163 				break;
3164 			case 5:
3165 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3166 				break;
3167 			case 7:
3168 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3169 				break;
3170 			}
3171 
3172 			*target_dco_khz = dco;
3173 
3174 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3175 
3176 			state->mg_clktop2_coreclkctl1 =
3177 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3178 
3179 			state->mg_clktop2_hsclkctl =
3180 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3181 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3182 				hsdiv |
3183 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3184 
3185 			return true;
3186 		}
3187 	}
3188 
3189 	return false;
3190 }
3191 
3192 /*
3193  * The specification for this function uses real numbers, so the math had to be
3194  * adapted to integer-only calculation, that's why it looks so different.
3195  */
3196 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3197 				  struct intel_dpll_hw_state *pll_state)
3198 {
3199 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3200 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3201 	int clock = crtc_state->port_clock;
3202 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3203 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3204 	u32 prop_coeff, int_coeff;
3205 	u32 tdc_targetcnt, feedfwgain;
3206 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3207 	u64 tmp;
3208 	bool use_ssc = false;
3209 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3210 	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
3211 
3212 	memset(pll_state, 0, sizeof(*pll_state));
3213 
3214 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3215 				      pll_state, is_dkl)) {
3216 		drm_dbg_kms(&dev_priv->drm,
3217 			    "Failed to find divisors for clock %d\n", clock);
3218 		return false;
3219 	}
3220 
3221 	m1div = 2;
3222 	m2div_int = dco_khz / (refclk_khz * m1div);
3223 	if (m2div_int > 255) {
3224 		if (!is_dkl) {
3225 			m1div = 4;
3226 			m2div_int = dco_khz / (refclk_khz * m1div);
3227 		}
3228 
3229 		if (m2div_int > 255) {
3230 			drm_dbg_kms(&dev_priv->drm,
3231 				    "Failed to find mdiv for clock %d\n",
3232 				    clock);
3233 			return false;
3234 		}
3235 	}
3236 	m2div_rem = dco_khz % (refclk_khz * m1div);
3237 
3238 	tmp = (u64)m2div_rem * (1 << 22);
3239 	do_div(tmp, refclk_khz * m1div);
3240 	m2div_frac = tmp;
3241 
3242 	switch (refclk_khz) {
3243 	case 19200:
3244 		iref_ndiv = 1;
3245 		iref_trim = 28;
3246 		iref_pulse_w = 1;
3247 		break;
3248 	case 24000:
3249 		iref_ndiv = 1;
3250 		iref_trim = 25;
3251 		iref_pulse_w = 2;
3252 		break;
3253 	case 38400:
3254 		iref_ndiv = 2;
3255 		iref_trim = 28;
3256 		iref_pulse_w = 1;
3257 		break;
3258 	default:
3259 		MISSING_CASE(refclk_khz);
3260 		return false;
3261 	}
3262 
3263 	/*
3264 	 * tdc_res = 0.000003
3265 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3266 	 *
3267 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3268 	 * was supposed to be a division, but we rearranged the operations of
3269 	 * the formula to avoid early divisions so we don't multiply the
3270 	 * rounding errors.
3271 	 *
3272 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3273 	 * we also rearrange to work with integers.
3274 	 *
3275 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3276 	 * last division by 10.
3277 	 */
3278 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3279 
3280 	/*
3281 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3282 	 * 32 bits. That's not a problem since we round the division down
3283 	 * anyway.
3284 	 */
3285 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3286 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3287 
3288 	if (dco_khz >= 9000000) {
3289 		prop_coeff = 5;
3290 		int_coeff = 10;
3291 	} else {
3292 		prop_coeff = 4;
3293 		int_coeff = 8;
3294 	}
3295 
3296 	if (use_ssc) {
3297 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3298 		do_div(tmp, refclk_khz * m1div * 10000);
3299 		ssc_stepsize = tmp;
3300 
3301 		tmp = mul_u32_u32(dco_khz, 1000);
3302 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3303 	} else {
3304 		ssc_stepsize = 0;
3305 		ssc_steplen = 0;
3306 	}
3307 	ssc_steplog = 4;
3308 
3309 	/* write pll_state calculations */
3310 	if (is_dkl) {
3311 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3312 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3313 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3314 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3315 
3316 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3317 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3318 
3319 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3320 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3321 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3322 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3323 
3324 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3325 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3326 
3327 		pll_state->mg_pll_tdc_coldst_bias =
3328 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3329 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3330 
3331 	} else {
3332 		pll_state->mg_pll_div0 =
3333 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3334 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3335 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3336 
3337 		pll_state->mg_pll_div1 =
3338 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3339 			MG_PLL_DIV1_DITHER_DIV_2 |
3340 			MG_PLL_DIV1_NDIVRATIO(1) |
3341 			MG_PLL_DIV1_FBPREDIV(m1div);
3342 
3343 		pll_state->mg_pll_lf =
3344 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3345 			MG_PLL_LF_AFCCNTSEL_512 |
3346 			MG_PLL_LF_GAINCTRL(1) |
3347 			MG_PLL_LF_INT_COEFF(int_coeff) |
3348 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3349 
3350 		pll_state->mg_pll_frac_lock =
3351 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3352 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3353 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3354 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3355 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3356 		if (use_ssc || m2div_rem > 0)
3357 			pll_state->mg_pll_frac_lock |=
3358 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3359 
3360 		pll_state->mg_pll_ssc =
3361 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3362 			MG_PLL_SSC_TYPE(2) |
3363 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3364 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3365 			MG_PLL_SSC_FLLEN |
3366 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3367 
3368 		pll_state->mg_pll_tdc_coldst_bias =
3369 			MG_PLL_TDC_COLDST_COLDSTART |
3370 			MG_PLL_TDC_COLDST_IREFINT_EN |
3371 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3372 			MG_PLL_TDC_TDCOVCCORR_EN |
3373 			MG_PLL_TDC_TDCSEL(3);
3374 
3375 		pll_state->mg_pll_bias =
3376 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3377 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3378 			MG_PLL_BIAS_BIAS_BONUS(10) |
3379 			MG_PLL_BIAS_BIASCAL_EN |
3380 			MG_PLL_BIAS_CTRIM(12) |
3381 			MG_PLL_BIAS_VREF_RDAC(4) |
3382 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3383 
3384 		if (refclk_khz == 38400) {
3385 			pll_state->mg_pll_tdc_coldst_bias_mask =
3386 				MG_PLL_TDC_COLDST_COLDSTART;
3387 			pll_state->mg_pll_bias_mask = 0;
3388 		} else {
3389 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3390 			pll_state->mg_pll_bias_mask = -1U;
3391 		}
3392 
3393 		pll_state->mg_pll_tdc_coldst_bias &=
3394 			pll_state->mg_pll_tdc_coldst_bias_mask;
3395 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3396 	}
3397 
3398 	return true;
3399 }
3400 
3401 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3402 				   const struct intel_shared_dpll *pll)
3403 {
3404 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
3405 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3406 	u64 tmp;
3407 
3408 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3409 
3410 	if (INTEL_GEN(dev_priv) >= 12) {
3411 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3412 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3413 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3414 
3415 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3416 			m2_frac = pll_state->mg_pll_bias &
3417 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3418 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3419 		} else {
3420 			m2_frac = 0;
3421 		}
3422 	} else {
3423 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3424 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3425 
3426 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3427 			m2_frac = pll_state->mg_pll_div0 &
3428 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3429 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3430 		} else {
3431 			m2_frac = 0;
3432 		}
3433 	}
3434 
3435 	switch (pll_state->mg_clktop2_hsclkctl &
3436 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3437 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3438 		div1 = 2;
3439 		break;
3440 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3441 		div1 = 3;
3442 		break;
3443 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3444 		div1 = 5;
3445 		break;
3446 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3447 		div1 = 7;
3448 		break;
3449 	default:
3450 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3451 		return 0;
3452 	}
3453 
3454 	div2 = (pll_state->mg_clktop2_hsclkctl &
3455 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3456 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3457 
3458 	/* div2 value of 0 is same as 1 means no div */
3459 	if (div2 == 0)
3460 		div2 = 1;
3461 
3462 	/*
3463 	 * Adjust the original formula to delay the division by 2^22 in order to
3464 	 * minimize possible rounding errors.
3465 	 */
3466 	tmp = (u64)m1 * m2_int * ref_clock +
3467 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3468 	tmp = div_u64(tmp, 5 * div1 * div2);
3469 
3470 	return tmp;
3471 }
3472 
3473 /**
3474  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3475  * @crtc_state: state for the CRTC to select the DPLL for
3476  * @port_dpll_id: the active @port_dpll_id to select
3477  *
3478  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3479  * CRTC.
3480  */
3481 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3482 			      enum icl_port_dpll_id port_dpll_id)
3483 {
3484 	struct icl_port_dpll *port_dpll =
3485 		&crtc_state->icl_port_dplls[port_dpll_id];
3486 
3487 	crtc_state->shared_dpll = port_dpll->pll;
3488 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3489 }
3490 
3491 static void icl_update_active_dpll(struct intel_atomic_state *state,
3492 				   struct intel_crtc *crtc,
3493 				   struct intel_encoder *encoder)
3494 {
3495 	struct intel_crtc_state *crtc_state =
3496 		intel_atomic_get_new_crtc_state(state, crtc);
3497 	struct intel_digital_port *primary_port;
3498 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3499 
3500 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3501 		enc_to_mst(encoder)->primary :
3502 		enc_to_dig_port(encoder);
3503 
3504 	if (primary_port &&
3505 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3506 	     primary_port->tc_mode == TC_PORT_LEGACY))
3507 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3508 
3509 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3510 }
3511 
3512 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3513 {
3514 	if (!(i915->hti_state & HDPORT_ENABLED))
3515 		return 0;
3516 
3517 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3518 }
3519 
3520 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3521 				   struct intel_crtc *crtc,
3522 				   struct intel_encoder *encoder)
3523 {
3524 	struct intel_crtc_state *crtc_state =
3525 		intel_atomic_get_new_crtc_state(state, crtc);
3526 	struct skl_wrpll_params pll_params = { };
3527 	struct icl_port_dpll *port_dpll =
3528 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3529 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3530 	enum port port = encoder->port;
3531 	unsigned long dpll_mask;
3532 	int ret;
3533 
3534 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3535 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3536 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3537 	else
3538 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3539 
3540 	if (!ret) {
3541 		drm_dbg_kms(&dev_priv->drm,
3542 			    "Could not calculate combo PHY PLL state.\n");
3543 
3544 		return false;
3545 	}
3546 
3547 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3548 
3549 	if (IS_DG1(dev_priv)) {
3550 		if (port == PORT_D || port == PORT_E) {
3551 			dpll_mask =
3552 				BIT(DPLL_ID_DG1_DPLL2) |
3553 				BIT(DPLL_ID_DG1_DPLL3);
3554 		} else {
3555 			dpll_mask =
3556 				BIT(DPLL_ID_DG1_DPLL0) |
3557 				BIT(DPLL_ID_DG1_DPLL1);
3558 		}
3559 	} else if (IS_ROCKETLAKE(dev_priv)) {
3560 		dpll_mask =
3561 			BIT(DPLL_ID_EHL_DPLL4) |
3562 			BIT(DPLL_ID_ICL_DPLL1) |
3563 			BIT(DPLL_ID_ICL_DPLL0);
3564 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3565 		dpll_mask =
3566 			BIT(DPLL_ID_EHL_DPLL4) |
3567 			BIT(DPLL_ID_ICL_DPLL1) |
3568 			BIT(DPLL_ID_ICL_DPLL0);
3569 	} else {
3570 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3571 	}
3572 
3573 	/* Eliminate DPLLs from consideration if reserved by HTI */
3574 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3575 
3576 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3577 						&port_dpll->hw_state,
3578 						dpll_mask);
3579 	if (!port_dpll->pll) {
3580 		drm_dbg_kms(&dev_priv->drm,
3581 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3582 			    encoder->base.base.id, encoder->base.name);
3583 		return false;
3584 	}
3585 
3586 	intel_reference_shared_dpll(state, crtc,
3587 				    port_dpll->pll, &port_dpll->hw_state);
3588 
3589 	icl_update_active_dpll(state, crtc, encoder);
3590 
3591 	return true;
3592 }
3593 
3594 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3595 				 struct intel_crtc *crtc,
3596 				 struct intel_encoder *encoder)
3597 {
3598 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3599 	struct intel_crtc_state *crtc_state =
3600 		intel_atomic_get_new_crtc_state(state, crtc);
3601 	struct skl_wrpll_params pll_params = { };
3602 	struct icl_port_dpll *port_dpll;
3603 	enum intel_dpll_id dpll_id;
3604 
3605 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3606 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3607 		drm_dbg_kms(&dev_priv->drm,
3608 			    "Could not calculate TBT PLL state.\n");
3609 		return false;
3610 	}
3611 
3612 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3613 
3614 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3615 						&port_dpll->hw_state,
3616 						BIT(DPLL_ID_ICL_TBTPLL));
3617 	if (!port_dpll->pll) {
3618 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3619 		return false;
3620 	}
3621 	intel_reference_shared_dpll(state, crtc,
3622 				    port_dpll->pll, &port_dpll->hw_state);
3623 
3624 
3625 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3626 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3627 		drm_dbg_kms(&dev_priv->drm,
3628 			    "Could not calculate MG PHY PLL state.\n");
3629 		goto err_unreference_tbt_pll;
3630 	}
3631 
3632 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3633 							 encoder->port));
3634 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3635 						&port_dpll->hw_state,
3636 						BIT(dpll_id));
3637 	if (!port_dpll->pll) {
3638 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3639 		goto err_unreference_tbt_pll;
3640 	}
3641 	intel_reference_shared_dpll(state, crtc,
3642 				    port_dpll->pll, &port_dpll->hw_state);
3643 
3644 	icl_update_active_dpll(state, crtc, encoder);
3645 
3646 	return true;
3647 
3648 err_unreference_tbt_pll:
3649 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3650 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3651 
3652 	return false;
3653 }
3654 
3655 static bool icl_get_dplls(struct intel_atomic_state *state,
3656 			  struct intel_crtc *crtc,
3657 			  struct intel_encoder *encoder)
3658 {
3659 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3660 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3661 
3662 	if (intel_phy_is_combo(dev_priv, phy))
3663 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3664 	else if (intel_phy_is_tc(dev_priv, phy))
3665 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3666 
3667 	MISSING_CASE(phy);
3668 
3669 	return false;
3670 }
3671 
3672 static void icl_put_dplls(struct intel_atomic_state *state,
3673 			  struct intel_crtc *crtc)
3674 {
3675 	const struct intel_crtc_state *old_crtc_state =
3676 		intel_atomic_get_old_crtc_state(state, crtc);
3677 	struct intel_crtc_state *new_crtc_state =
3678 		intel_atomic_get_new_crtc_state(state, crtc);
3679 	enum icl_port_dpll_id id;
3680 
3681 	new_crtc_state->shared_dpll = NULL;
3682 
3683 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3684 		const struct icl_port_dpll *old_port_dpll =
3685 			&old_crtc_state->icl_port_dplls[id];
3686 		struct icl_port_dpll *new_port_dpll =
3687 			&new_crtc_state->icl_port_dplls[id];
3688 
3689 		new_port_dpll->pll = NULL;
3690 
3691 		if (!old_port_dpll->pll)
3692 			continue;
3693 
3694 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3695 	}
3696 }
3697 
3698 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3699 				struct intel_shared_dpll *pll,
3700 				struct intel_dpll_hw_state *hw_state)
3701 {
3702 	const enum intel_dpll_id id = pll->info->id;
3703 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3704 	intel_wakeref_t wakeref;
3705 	bool ret = false;
3706 	u32 val;
3707 
3708 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3709 						     POWER_DOMAIN_DISPLAY_CORE);
3710 	if (!wakeref)
3711 		return false;
3712 
3713 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3714 	if (!(val & PLL_ENABLE))
3715 		goto out;
3716 
3717 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3718 						  MG_REFCLKIN_CTL(tc_port));
3719 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3720 
3721 	hw_state->mg_clktop2_coreclkctl1 =
3722 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3723 	hw_state->mg_clktop2_coreclkctl1 &=
3724 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3725 
3726 	hw_state->mg_clktop2_hsclkctl =
3727 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3728 	hw_state->mg_clktop2_hsclkctl &=
3729 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3730 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3731 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3732 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3733 
3734 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3735 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3736 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3737 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3738 						   MG_PLL_FRAC_LOCK(tc_port));
3739 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3740 
3741 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3742 	hw_state->mg_pll_tdc_coldst_bias =
3743 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3744 
3745 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3746 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3747 		hw_state->mg_pll_bias_mask = 0;
3748 	} else {
3749 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3750 		hw_state->mg_pll_bias_mask = -1U;
3751 	}
3752 
3753 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3754 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3755 
3756 	ret = true;
3757 out:
3758 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3759 	return ret;
3760 }
3761 
3762 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3763 				 struct intel_shared_dpll *pll,
3764 				 struct intel_dpll_hw_state *hw_state)
3765 {
3766 	const enum intel_dpll_id id = pll->info->id;
3767 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3768 	intel_wakeref_t wakeref;
3769 	bool ret = false;
3770 	u32 val;
3771 
3772 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3773 						     POWER_DOMAIN_DISPLAY_CORE);
3774 	if (!wakeref)
3775 		return false;
3776 
3777 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3778 	if (!(val & PLL_ENABLE))
3779 		goto out;
3780 
3781 	/*
3782 	 * All registers read here have the same HIP_INDEX_REG even though
3783 	 * they are on different building blocks
3784 	 */
3785 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3786 		       HIP_INDEX_VAL(tc_port, 0x2));
3787 
3788 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3789 						  DKL_REFCLKIN_CTL(tc_port));
3790 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3791 
3792 	hw_state->mg_clktop2_hsclkctl =
3793 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3794 	hw_state->mg_clktop2_hsclkctl &=
3795 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3796 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3797 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3798 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3799 
3800 	hw_state->mg_clktop2_coreclkctl1 =
3801 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3802 	hw_state->mg_clktop2_coreclkctl1 &=
3803 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3804 
3805 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3806 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3807 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3808 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3809 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3810 
3811 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3812 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3813 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3814 
3815 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3816 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3817 				 DKL_PLL_SSC_STEP_LEN_MASK |
3818 				 DKL_PLL_SSC_STEP_NUM_MASK |
3819 				 DKL_PLL_SSC_EN);
3820 
3821 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3822 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3823 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3824 
3825 	hw_state->mg_pll_tdc_coldst_bias =
3826 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3827 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3828 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3829 
3830 	ret = true;
3831 out:
3832 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3833 	return ret;
3834 }
3835 
3836 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3837 				 struct intel_shared_dpll *pll,
3838 				 struct intel_dpll_hw_state *hw_state,
3839 				 i915_reg_t enable_reg)
3840 {
3841 	const enum intel_dpll_id id = pll->info->id;
3842 	intel_wakeref_t wakeref;
3843 	bool ret = false;
3844 	u32 val;
3845 
3846 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3847 						     POWER_DOMAIN_DISPLAY_CORE);
3848 	if (!wakeref)
3849 		return false;
3850 
3851 	val = intel_de_read(dev_priv, enable_reg);
3852 	if (!(val & PLL_ENABLE))
3853 		goto out;
3854 
3855 	if (IS_DG1(dev_priv)) {
3856 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3857 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3858 	} else if (IS_ROCKETLAKE(dev_priv)) {
3859 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3860 						 RKL_DPLL_CFGCR0(id));
3861 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3862 						 RKL_DPLL_CFGCR1(id));
3863 	} else if (INTEL_GEN(dev_priv) >= 12) {
3864 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3865 						 TGL_DPLL_CFGCR0(id));
3866 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3867 						 TGL_DPLL_CFGCR1(id));
3868 	} else {
3869 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3870 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3871 							 ICL_DPLL_CFGCR0(4));
3872 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3873 							 ICL_DPLL_CFGCR1(4));
3874 		} else {
3875 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3876 							 ICL_DPLL_CFGCR0(id));
3877 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3878 							 ICL_DPLL_CFGCR1(id));
3879 		}
3880 	}
3881 
3882 	ret = true;
3883 out:
3884 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3885 	return ret;
3886 }
3887 
3888 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3889 				   struct intel_shared_dpll *pll,
3890 				   struct intel_dpll_hw_state *hw_state)
3891 {
3892 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3893 
3894 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3895 }
3896 
3897 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3898 				 struct intel_shared_dpll *pll,
3899 				 struct intel_dpll_hw_state *hw_state)
3900 {
3901 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3902 }
3903 
3904 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3905 			   struct intel_shared_dpll *pll)
3906 {
3907 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3908 	const enum intel_dpll_id id = pll->info->id;
3909 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3910 
3911 	if (IS_DG1(dev_priv)) {
3912 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3913 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3914 	} else if (IS_ROCKETLAKE(dev_priv)) {
3915 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3916 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3917 	} else if (INTEL_GEN(dev_priv) >= 12) {
3918 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3919 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3920 	} else {
3921 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3922 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3923 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3924 		} else {
3925 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3926 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3927 		}
3928 	}
3929 
3930 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3931 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3932 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3933 }
3934 
3935 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3936 			     struct intel_shared_dpll *pll)
3937 {
3938 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3939 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3940 	u32 val;
3941 
3942 	/*
3943 	 * Some of the following registers have reserved fields, so program
3944 	 * these with RMW based on a mask. The mask can be fixed or generated
3945 	 * during the calc/readout phase if the mask depends on some other HW
3946 	 * state like refclk, see icl_calc_mg_pll_state().
3947 	 */
3948 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3949 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3950 	val |= hw_state->mg_refclkin_ctl;
3951 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3952 
3953 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3954 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3955 	val |= hw_state->mg_clktop2_coreclkctl1;
3956 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3957 
3958 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3959 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3960 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3961 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3962 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3963 	val |= hw_state->mg_clktop2_hsclkctl;
3964 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3965 
3966 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3967 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3968 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3969 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3970 		       hw_state->mg_pll_frac_lock);
3971 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3972 
3973 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3974 	val &= ~hw_state->mg_pll_bias_mask;
3975 	val |= hw_state->mg_pll_bias;
3976 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3977 
3978 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3979 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3980 	val |= hw_state->mg_pll_tdc_coldst_bias;
3981 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3982 
3983 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3984 }
3985 
3986 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3987 			  struct intel_shared_dpll *pll)
3988 {
3989 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3990 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3991 	u32 val;
3992 
3993 	/*
3994 	 * All registers programmed here have the same HIP_INDEX_REG even
3995 	 * though on different building block
3996 	 */
3997 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3998 		       HIP_INDEX_VAL(tc_port, 0x2));
3999 
4000 	/* All the registers are RMW */
4001 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
4002 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
4003 	val |= hw_state->mg_refclkin_ctl;
4004 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
4005 
4006 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
4007 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
4008 	val |= hw_state->mg_clktop2_coreclkctl1;
4009 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
4010 
4011 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
4012 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4013 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4014 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4015 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4016 	val |= hw_state->mg_clktop2_hsclkctl;
4017 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
4018 
4019 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
4020 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
4021 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
4022 		 DKL_PLL_DIV0_FBPREDIV_MASK |
4023 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
4024 	val |= hw_state->mg_pll_div0;
4025 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
4026 
4027 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
4028 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4029 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4030 	val |= hw_state->mg_pll_div1;
4031 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4032 
4033 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4034 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4035 		 DKL_PLL_SSC_STEP_LEN_MASK |
4036 		 DKL_PLL_SSC_STEP_NUM_MASK |
4037 		 DKL_PLL_SSC_EN);
4038 	val |= hw_state->mg_pll_ssc;
4039 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4040 
4041 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4042 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4043 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4044 	val |= hw_state->mg_pll_bias;
4045 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4046 
4047 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4048 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4049 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4050 	val |= hw_state->mg_pll_tdc_coldst_bias;
4051 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4052 
4053 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4054 }
4055 
4056 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4057 				 struct intel_shared_dpll *pll,
4058 				 i915_reg_t enable_reg)
4059 {
4060 	u32 val;
4061 
4062 	val = intel_de_read(dev_priv, enable_reg);
4063 	val |= PLL_POWER_ENABLE;
4064 	intel_de_write(dev_priv, enable_reg, val);
4065 
4066 	/*
4067 	 * The spec says we need to "wait" but it also says it should be
4068 	 * immediate.
4069 	 */
4070 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4071 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4072 			pll->info->id);
4073 }
4074 
4075 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4076 			   struct intel_shared_dpll *pll,
4077 			   i915_reg_t enable_reg)
4078 {
4079 	u32 val;
4080 
4081 	val = intel_de_read(dev_priv, enable_reg);
4082 	val |= PLL_ENABLE;
4083 	intel_de_write(dev_priv, enable_reg, val);
4084 
4085 	/* Timeout is actually 600us. */
4086 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4087 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4088 }
4089 
4090 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4091 			     struct intel_shared_dpll *pll)
4092 {
4093 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4094 
4095 	if (IS_JSL_EHL(dev_priv) &&
4096 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4097 
4098 		/*
4099 		 * We need to disable DC states when this DPLL is enabled.
4100 		 * This can be done by taking a reference on DPLL4 power
4101 		 * domain.
4102 		 */
4103 		pll->wakeref = intel_display_power_get(dev_priv,
4104 						       POWER_DOMAIN_DPLL_DC_OFF);
4105 	}
4106 
4107 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4108 
4109 	icl_dpll_write(dev_priv, pll);
4110 
4111 	/*
4112 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4113 	 * paths should already be setting the appropriate voltage, hence we do
4114 	 * nothing here.
4115 	 */
4116 
4117 	icl_pll_enable(dev_priv, pll, enable_reg);
4118 
4119 	/* DVFS post sequence would be here. See the comment above. */
4120 }
4121 
4122 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4123 			   struct intel_shared_dpll *pll)
4124 {
4125 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4126 
4127 	icl_dpll_write(dev_priv, pll);
4128 
4129 	/*
4130 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4131 	 * paths should already be setting the appropriate voltage, hence we do
4132 	 * nothing here.
4133 	 */
4134 
4135 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4136 
4137 	/* DVFS post sequence would be here. See the comment above. */
4138 }
4139 
4140 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4141 			  struct intel_shared_dpll *pll)
4142 {
4143 	i915_reg_t enable_reg =
4144 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4145 
4146 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4147 
4148 	if (INTEL_GEN(dev_priv) >= 12)
4149 		dkl_pll_write(dev_priv, pll);
4150 	else
4151 		icl_mg_pll_write(dev_priv, pll);
4152 
4153 	/*
4154 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4155 	 * paths should already be setting the appropriate voltage, hence we do
4156 	 * nothing here.
4157 	 */
4158 
4159 	icl_pll_enable(dev_priv, pll, enable_reg);
4160 
4161 	/* DVFS post sequence would be here. See the comment above. */
4162 }
4163 
4164 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4165 			    struct intel_shared_dpll *pll,
4166 			    i915_reg_t enable_reg)
4167 {
4168 	u32 val;
4169 
4170 	/* The first steps are done by intel_ddi_post_disable(). */
4171 
4172 	/*
4173 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4174 	 * paths should already be setting the appropriate voltage, hence we do
4175 	 * nothign here.
4176 	 */
4177 
4178 	val = intel_de_read(dev_priv, enable_reg);
4179 	val &= ~PLL_ENABLE;
4180 	intel_de_write(dev_priv, enable_reg, val);
4181 
4182 	/* Timeout is actually 1us. */
4183 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4184 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4185 
4186 	/* DVFS post sequence would be here. See the comment above. */
4187 
4188 	val = intel_de_read(dev_priv, enable_reg);
4189 	val &= ~PLL_POWER_ENABLE;
4190 	intel_de_write(dev_priv, enable_reg, val);
4191 
4192 	/*
4193 	 * The spec says we need to "wait" but it also says it should be
4194 	 * immediate.
4195 	 */
4196 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4197 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4198 			pll->info->id);
4199 }
4200 
4201 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4202 			      struct intel_shared_dpll *pll)
4203 {
4204 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4205 
4206 	icl_pll_disable(dev_priv, pll, enable_reg);
4207 
4208 	if (IS_JSL_EHL(dev_priv) &&
4209 	    pll->info->id == DPLL_ID_EHL_DPLL4)
4210 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4211 					pll->wakeref);
4212 }
4213 
4214 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4215 			    struct intel_shared_dpll *pll)
4216 {
4217 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4218 }
4219 
4220 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4221 			   struct intel_shared_dpll *pll)
4222 {
4223 	i915_reg_t enable_reg =
4224 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4225 
4226 	icl_pll_disable(dev_priv, pll, enable_reg);
4227 }
4228 
4229 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4230 {
4231 	/* No SSC ref */
4232 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4233 }
4234 
4235 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4236 			      const struct intel_dpll_hw_state *hw_state)
4237 {
4238 	drm_dbg_kms(&dev_priv->drm,
4239 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4240 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4241 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4242 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4243 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4244 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4245 		    hw_state->cfgcr0, hw_state->cfgcr1,
4246 		    hw_state->mg_refclkin_ctl,
4247 		    hw_state->mg_clktop2_coreclkctl1,
4248 		    hw_state->mg_clktop2_hsclkctl,
4249 		    hw_state->mg_pll_div0,
4250 		    hw_state->mg_pll_div1,
4251 		    hw_state->mg_pll_lf,
4252 		    hw_state->mg_pll_frac_lock,
4253 		    hw_state->mg_pll_ssc,
4254 		    hw_state->mg_pll_bias,
4255 		    hw_state->mg_pll_tdc_coldst_bias);
4256 }
4257 
4258 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4259 	.enable = combo_pll_enable,
4260 	.disable = combo_pll_disable,
4261 	.get_hw_state = combo_pll_get_hw_state,
4262 	.get_freq = icl_ddi_combo_pll_get_freq,
4263 };
4264 
4265 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4266 	.enable = tbt_pll_enable,
4267 	.disable = tbt_pll_disable,
4268 	.get_hw_state = tbt_pll_get_hw_state,
4269 	.get_freq = icl_ddi_tbt_pll_get_freq,
4270 };
4271 
4272 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4273 	.enable = mg_pll_enable,
4274 	.disable = mg_pll_disable,
4275 	.get_hw_state = mg_pll_get_hw_state,
4276 	.get_freq = icl_ddi_mg_pll_get_freq,
4277 };
4278 
4279 static const struct dpll_info icl_plls[] = {
4280 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4281 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4282 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4283 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4284 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4285 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4286 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4287 	{ },
4288 };
4289 
4290 static const struct intel_dpll_mgr icl_pll_mgr = {
4291 	.dpll_info = icl_plls,
4292 	.get_dplls = icl_get_dplls,
4293 	.put_dplls = icl_put_dplls,
4294 	.update_active_dpll = icl_update_active_dpll,
4295 	.update_ref_clks = icl_update_dpll_ref_clks,
4296 	.dump_hw_state = icl_dump_hw_state,
4297 };
4298 
4299 static const struct dpll_info ehl_plls[] = {
4300 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4301 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4302 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4303 	{ },
4304 };
4305 
4306 static const struct intel_dpll_mgr ehl_pll_mgr = {
4307 	.dpll_info = ehl_plls,
4308 	.get_dplls = icl_get_dplls,
4309 	.put_dplls = icl_put_dplls,
4310 	.update_ref_clks = icl_update_dpll_ref_clks,
4311 	.dump_hw_state = icl_dump_hw_state,
4312 };
4313 
4314 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4315 	.enable = mg_pll_enable,
4316 	.disable = mg_pll_disable,
4317 	.get_hw_state = dkl_pll_get_hw_state,
4318 	.get_freq = icl_ddi_mg_pll_get_freq,
4319 };
4320 
4321 static const struct dpll_info tgl_plls[] = {
4322 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4323 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4324 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4325 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4326 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4327 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4328 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4329 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4330 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4331 	{ },
4332 };
4333 
4334 static const struct intel_dpll_mgr tgl_pll_mgr = {
4335 	.dpll_info = tgl_plls,
4336 	.get_dplls = icl_get_dplls,
4337 	.put_dplls = icl_put_dplls,
4338 	.update_active_dpll = icl_update_active_dpll,
4339 	.update_ref_clks = icl_update_dpll_ref_clks,
4340 	.dump_hw_state = icl_dump_hw_state,
4341 };
4342 
4343 static const struct dpll_info rkl_plls[] = {
4344 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4345 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4346 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4347 	{ },
4348 };
4349 
4350 static const struct intel_dpll_mgr rkl_pll_mgr = {
4351 	.dpll_info = rkl_plls,
4352 	.get_dplls = icl_get_dplls,
4353 	.put_dplls = icl_put_dplls,
4354 	.update_ref_clks = icl_update_dpll_ref_clks,
4355 	.dump_hw_state = icl_dump_hw_state,
4356 };
4357 
4358 static const struct dpll_info dg1_plls[] = {
4359 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4360 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4361 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4362 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4363 	{ },
4364 };
4365 
4366 static const struct intel_dpll_mgr dg1_pll_mgr = {
4367 	.dpll_info = dg1_plls,
4368 	.get_dplls = icl_get_dplls,
4369 	.put_dplls = icl_put_dplls,
4370 	.update_ref_clks = icl_update_dpll_ref_clks,
4371 	.dump_hw_state = icl_dump_hw_state,
4372 };
4373 
4374 /**
4375  * intel_shared_dpll_init - Initialize shared DPLLs
4376  * @dev: drm device
4377  *
4378  * Initialize shared DPLLs for @dev.
4379  */
4380 void intel_shared_dpll_init(struct drm_device *dev)
4381 {
4382 	struct drm_i915_private *dev_priv = to_i915(dev);
4383 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4384 	const struct dpll_info *dpll_info;
4385 	int i;
4386 
4387 	if (IS_DG1(dev_priv))
4388 		dpll_mgr = &dg1_pll_mgr;
4389 	else if (IS_ROCKETLAKE(dev_priv))
4390 		dpll_mgr = &rkl_pll_mgr;
4391 	else if (INTEL_GEN(dev_priv) >= 12)
4392 		dpll_mgr = &tgl_pll_mgr;
4393 	else if (IS_JSL_EHL(dev_priv))
4394 		dpll_mgr = &ehl_pll_mgr;
4395 	else if (INTEL_GEN(dev_priv) >= 11)
4396 		dpll_mgr = &icl_pll_mgr;
4397 	else if (IS_CANNONLAKE(dev_priv))
4398 		dpll_mgr = &cnl_pll_mgr;
4399 	else if (IS_GEN9_BC(dev_priv))
4400 		dpll_mgr = &skl_pll_mgr;
4401 	else if (IS_GEN9_LP(dev_priv))
4402 		dpll_mgr = &bxt_pll_mgr;
4403 	else if (HAS_DDI(dev_priv))
4404 		dpll_mgr = &hsw_pll_mgr;
4405 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4406 		dpll_mgr = &pch_pll_mgr;
4407 
4408 	if (!dpll_mgr) {
4409 		dev_priv->dpll.num_shared_dpll = 0;
4410 		return;
4411 	}
4412 
4413 	dpll_info = dpll_mgr->dpll_info;
4414 
4415 	for (i = 0; dpll_info[i].name; i++) {
4416 		drm_WARN_ON(dev, i != dpll_info[i].id);
4417 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4418 	}
4419 
4420 	dev_priv->dpll.mgr = dpll_mgr;
4421 	dev_priv->dpll.num_shared_dpll = i;
4422 	mutex_init(&dev_priv->dpll.lock);
4423 
4424 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4425 }
4426 
4427 /**
4428  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4429  * @state: atomic state
4430  * @crtc: CRTC to reserve DPLLs for
4431  * @encoder: encoder
4432  *
4433  * This function reserves all required DPLLs for the given CRTC and encoder
4434  * combination in the current atomic commit @state and the new @crtc atomic
4435  * state.
4436  *
4437  * The new configuration in the atomic commit @state is made effective by
4438  * calling intel_shared_dpll_swap_state().
4439  *
4440  * The reserved DPLLs should be released by calling
4441  * intel_release_shared_dplls().
4442  *
4443  * Returns:
4444  * True if all required DPLLs were successfully reserved.
4445  */
4446 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4447 				struct intel_crtc *crtc,
4448 				struct intel_encoder *encoder)
4449 {
4450 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4451 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4452 
4453 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4454 		return false;
4455 
4456 	return dpll_mgr->get_dplls(state, crtc, encoder);
4457 }
4458 
4459 /**
4460  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4461  * @state: atomic state
4462  * @crtc: crtc from which the DPLLs are to be released
4463  *
4464  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4465  * from the current atomic commit @state and the old @crtc atomic state.
4466  *
4467  * The new configuration in the atomic commit @state is made effective by
4468  * calling intel_shared_dpll_swap_state().
4469  */
4470 void intel_release_shared_dplls(struct intel_atomic_state *state,
4471 				struct intel_crtc *crtc)
4472 {
4473 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4474 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4475 
4476 	/*
4477 	 * FIXME: this function is called for every platform having a
4478 	 * compute_clock hook, even though the platform doesn't yet support
4479 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4480 	 * called on those.
4481 	 */
4482 	if (!dpll_mgr)
4483 		return;
4484 
4485 	dpll_mgr->put_dplls(state, crtc);
4486 }
4487 
4488 /**
4489  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4490  * @state: atomic state
4491  * @crtc: the CRTC for which to update the active DPLL
4492  * @encoder: encoder determining the type of port DPLL
4493  *
4494  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4495  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4496  * DPLL selected will be based on the current mode of the encoder's port.
4497  */
4498 void intel_update_active_dpll(struct intel_atomic_state *state,
4499 			      struct intel_crtc *crtc,
4500 			      struct intel_encoder *encoder)
4501 {
4502 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4503 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4504 
4505 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4506 		return;
4507 
4508 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4509 }
4510 
4511 /**
4512  * intel_dpll_get_freq - calculate the DPLL's output frequency
4513  * @i915: i915 device
4514  * @pll: DPLL for which to calculate the output frequency
4515  *
4516  * Return the output frequency corresponding to @pll's current state.
4517  */
4518 int intel_dpll_get_freq(struct drm_i915_private *i915,
4519 			const struct intel_shared_dpll *pll)
4520 {
4521 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4522 		return 0;
4523 
4524 	return pll->info->funcs->get_freq(i915, pll);
4525 }
4526 
4527 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4528 				  struct intel_shared_dpll *pll)
4529 {
4530 	struct intel_crtc *crtc;
4531 
4532 	pll->on = pll->info->funcs->get_hw_state(i915, pll,
4533 						 &pll->state.hw_state);
4534 
4535 	if (IS_JSL_EHL(i915) && pll->on &&
4536 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4537 		pll->wakeref = intel_display_power_get(i915,
4538 						       POWER_DOMAIN_DPLL_DC_OFF);
4539 	}
4540 
4541 	pll->state.crtc_mask = 0;
4542 	for_each_intel_crtc(&i915->drm, crtc) {
4543 		struct intel_crtc_state *crtc_state =
4544 			to_intel_crtc_state(crtc->base.state);
4545 
4546 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4547 			pll->state.crtc_mask |= 1 << crtc->pipe;
4548 	}
4549 	pll->active_mask = pll->state.crtc_mask;
4550 
4551 	drm_dbg_kms(&i915->drm,
4552 		    "%s hw state readout: crtc_mask 0x%08x, on %i\n",
4553 		    pll->info->name, pll->state.crtc_mask, pll->on);
4554 }
4555 
4556 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4557 {
4558 	int i;
4559 
4560 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4561 		i915->dpll.mgr->update_ref_clks(i915);
4562 
4563 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4564 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4565 }
4566 
4567 static void sanitize_dpll_state(struct drm_i915_private *i915,
4568 				struct intel_shared_dpll *pll)
4569 {
4570 	if (!pll->on || pll->active_mask)
4571 		return;
4572 
4573 	drm_dbg_kms(&i915->drm,
4574 		    "%s enabled but not in use, disabling\n",
4575 		    pll->info->name);
4576 
4577 	pll->info->funcs->disable(i915, pll);
4578 	pll->on = false;
4579 }
4580 
4581 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4582 {
4583 	int i;
4584 
4585 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4586 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4587 }
4588 
4589 /**
4590  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
4591  * @dev_priv: i915 drm device
4592  * @hw_state: hw state to be written to the log
4593  *
4594  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4595  */
4596 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4597 			      const struct intel_dpll_hw_state *hw_state)
4598 {
4599 	if (dev_priv->dpll.mgr) {
4600 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4601 	} else {
4602 		/* fallback for platforms that don't use the shared dpll
4603 		 * infrastructure
4604 		 */
4605 		drm_dbg_kms(&dev_priv->drm,
4606 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4607 			    "fp0: 0x%x, fp1: 0x%x\n",
4608 			    hw_state->dpll,
4609 			    hw_state->dpll_md,
4610 			    hw_state->fp0,
4611 			    hw_state->fp1);
4612 	}
4613 }
4614