1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_de.h"
25 #include "intel_display_types.h"
26 #include "intel_dpio_phy.h"
27 #include "intel_dpll.h"
28 #include "intel_dpll_mgr.h"
29 
30 /**
31  * DOC: Display PLLs
32  *
33  * Display PLLs used for driving outputs vary by platform. While some have
34  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
35  * from a pool. In the latter scenario, it is possible that multiple pipes
36  * share a PLL if their configurations match.
37  *
38  * This file provides an abstraction over display PLLs. The function
39  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
40  * users of a PLL are tracked and that tracking is integrated with the atomic
41  * modset interface. During an atomic operation, required PLLs can be reserved
42  * for a given CRTC and encoder configuration by calling
43  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
44  * with intel_release_shared_dplls().
45  * Changes to the users are first staged in the atomic state, and then made
46  * effective by calling intel_shared_dpll_swap_state() during the atomic
47  * commit phase.
48  */
49 
50 struct intel_dpll_mgr {
51 	const struct dpll_info *dpll_info;
52 
53 	bool (*get_dplls)(struct intel_atomic_state *state,
54 			  struct intel_crtc *crtc,
55 			  struct intel_encoder *encoder);
56 	void (*put_dplls)(struct intel_atomic_state *state,
57 			  struct intel_crtc *crtc);
58 	void (*update_active_dpll)(struct intel_atomic_state *state,
59 				   struct intel_crtc *crtc,
60 				   struct intel_encoder *encoder);
61 	void (*update_ref_clks)(struct drm_i915_private *i915);
62 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
63 			      const struct intel_dpll_hw_state *hw_state);
64 };
65 
66 static void
67 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
68 				  struct intel_shared_dpll_state *shared_dpll)
69 {
70 	enum intel_dpll_id i;
71 
72 	/* Copy shared dpll state */
73 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
74 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
75 
76 		shared_dpll[i] = pll->state;
77 	}
78 }
79 
80 static struct intel_shared_dpll_state *
81 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
82 {
83 	struct intel_atomic_state *state = to_intel_atomic_state(s);
84 
85 	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
86 
87 	if (!state->dpll_set) {
88 		state->dpll_set = true;
89 
90 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
91 						  state->shared_dpll);
92 	}
93 
94 	return state->shared_dpll;
95 }
96 
97 /**
98  * intel_get_shared_dpll_by_id - get a DPLL given its id
99  * @dev_priv: i915 device instance
100  * @id: pll id
101  *
102  * Returns:
103  * A pointer to the DPLL with @id
104  */
105 struct intel_shared_dpll *
106 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
107 			    enum intel_dpll_id id)
108 {
109 	return &dev_priv->dpll.shared_dplls[id];
110 }
111 
112 /**
113  * intel_get_shared_dpll_id - get the id of a DPLL
114  * @dev_priv: i915 device instance
115  * @pll: the DPLL
116  *
117  * Returns:
118  * The id of @pll
119  */
120 enum intel_dpll_id
121 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
122 			 struct intel_shared_dpll *pll)
123 {
124 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
125 
126 	if (drm_WARN_ON(&dev_priv->drm,
127 			pll_idx < 0 ||
128 			pll_idx >= dev_priv->dpll.num_shared_dpll))
129 		return -1;
130 
131 	return pll_idx;
132 }
133 
134 /* For ILK+ */
135 void assert_shared_dpll(struct drm_i915_private *dev_priv,
136 			struct intel_shared_dpll *pll,
137 			bool state)
138 {
139 	bool cur_state;
140 	struct intel_dpll_hw_state hw_state;
141 
142 	if (drm_WARN(&dev_priv->drm, !pll,
143 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
144 		return;
145 
146 	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
147 	I915_STATE_WARN(cur_state != state,
148 	     "%s assertion failure (expected %s, current %s)\n",
149 			pll->info->name, onoff(state), onoff(cur_state));
150 }
151 
152 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
153 {
154 	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
155 }
156 
157 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
158 {
159 	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
160 }
161 
162 static i915_reg_t
163 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
164 			   struct intel_shared_dpll *pll)
165 {
166 	if (IS_DG1(i915))
167 		return DG1_DPLL_ENABLE(pll->info->id);
168 	else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
169 		return MG_PLL_ENABLE(0);
170 
171 	return CNL_DPLL_ENABLE(pll->info->id);
172 }
173 
174 static i915_reg_t
175 intel_tc_pll_enable_reg(struct drm_i915_private *i915,
176 			struct intel_shared_dpll *pll)
177 {
178 	const enum intel_dpll_id id = pll->info->id;
179 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
180 
181 	if (IS_ALDERLAKE_P(i915))
182 		return ADLP_PORTTC_PLL_ENABLE(tc_port);
183 
184 	return MG_PLL_ENABLE(tc_port);
185 }
186 
187 /**
188  * intel_prepare_shared_dpll - call a dpll's prepare hook
189  * @crtc_state: CRTC, and its state, which has a shared dpll
190  *
191  * This calls the PLL's prepare hook if it has one and if the PLL is not
192  * already enabled. The prepare hook is platform specific.
193  */
194 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
195 {
196 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
197 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
198 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
199 
200 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
201 		return;
202 
203 	mutex_lock(&dev_priv->dpll.lock);
204 	drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
205 	if (!pll->active_mask) {
206 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
207 		drm_WARN_ON(&dev_priv->drm, pll->on);
208 		assert_shared_dpll_disabled(dev_priv, pll);
209 
210 		pll->info->funcs->prepare(dev_priv, pll);
211 	}
212 	mutex_unlock(&dev_priv->dpll.lock);
213 }
214 
215 /**
216  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
217  * @crtc_state: CRTC, and its state, which has a shared DPLL
218  *
219  * Enable the shared DPLL used by @crtc.
220  */
221 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
222 {
223 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
224 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
225 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
226 	unsigned int pipe_mask = BIT(crtc->pipe);
227 	unsigned int old_mask;
228 
229 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
230 		return;
231 
232 	mutex_lock(&dev_priv->dpll.lock);
233 	old_mask = pll->active_mask;
234 
235 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
236 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
237 		goto out;
238 
239 	pll->active_mask |= pipe_mask;
240 
241 	drm_dbg_kms(&dev_priv->drm,
242 		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
243 		    pll->info->name, pll->active_mask, pll->on,
244 		    crtc->base.base.id, crtc->base.name);
245 
246 	if (old_mask) {
247 		drm_WARN_ON(&dev_priv->drm, !pll->on);
248 		assert_shared_dpll_enabled(dev_priv, pll);
249 		goto out;
250 	}
251 	drm_WARN_ON(&dev_priv->drm, pll->on);
252 
253 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
254 	pll->info->funcs->enable(dev_priv, pll);
255 	pll->on = true;
256 
257 out:
258 	mutex_unlock(&dev_priv->dpll.lock);
259 }
260 
261 /**
262  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
263  * @crtc_state: CRTC, and its state, which has a shared DPLL
264  *
265  * Disable the shared DPLL used by @crtc.
266  */
267 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
268 {
269 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
270 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
271 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
272 	unsigned int pipe_mask = BIT(crtc->pipe);
273 
274 	/* PCH only available on ILK+ */
275 	if (DISPLAY_VER(dev_priv) < 5)
276 		return;
277 
278 	if (pll == NULL)
279 		return;
280 
281 	mutex_lock(&dev_priv->dpll.lock);
282 	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
283 		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
284 		     crtc->base.base.id, crtc->base.name))
285 		goto out;
286 
287 	drm_dbg_kms(&dev_priv->drm,
288 		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
289 		    pll->info->name, pll->active_mask, pll->on,
290 		    crtc->base.base.id, crtc->base.name);
291 
292 	assert_shared_dpll_enabled(dev_priv, pll);
293 	drm_WARN_ON(&dev_priv->drm, !pll->on);
294 
295 	pll->active_mask &= ~pipe_mask;
296 	if (pll->active_mask)
297 		goto out;
298 
299 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
300 	pll->info->funcs->disable(dev_priv, pll);
301 	pll->on = false;
302 
303 out:
304 	mutex_unlock(&dev_priv->dpll.lock);
305 }
306 
307 static struct intel_shared_dpll *
308 intel_find_shared_dpll(struct intel_atomic_state *state,
309 		       const struct intel_crtc *crtc,
310 		       const struct intel_dpll_hw_state *pll_state,
311 		       unsigned long dpll_mask)
312 {
313 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
314 	struct intel_shared_dpll *pll, *unused_pll = NULL;
315 	struct intel_shared_dpll_state *shared_dpll;
316 	enum intel_dpll_id i;
317 
318 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
319 
320 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
321 
322 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
323 		pll = &dev_priv->dpll.shared_dplls[i];
324 
325 		/* Only want to check enabled timings first */
326 		if (shared_dpll[i].pipe_mask == 0) {
327 			if (!unused_pll)
328 				unused_pll = pll;
329 			continue;
330 		}
331 
332 		if (memcmp(pll_state,
333 			   &shared_dpll[i].hw_state,
334 			   sizeof(*pll_state)) == 0) {
335 			drm_dbg_kms(&dev_priv->drm,
336 				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
337 				    crtc->base.base.id, crtc->base.name,
338 				    pll->info->name,
339 				    shared_dpll[i].pipe_mask,
340 				    pll->active_mask);
341 			return pll;
342 		}
343 	}
344 
345 	/* Ok no matching timings, maybe there's a free one? */
346 	if (unused_pll) {
347 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
348 			    crtc->base.base.id, crtc->base.name,
349 			    unused_pll->info->name);
350 		return unused_pll;
351 	}
352 
353 	return NULL;
354 }
355 
356 static void
357 intel_reference_shared_dpll(struct intel_atomic_state *state,
358 			    const struct intel_crtc *crtc,
359 			    const struct intel_shared_dpll *pll,
360 			    const struct intel_dpll_hw_state *pll_state)
361 {
362 	struct drm_i915_private *i915 = to_i915(state->base.dev);
363 	struct intel_shared_dpll_state *shared_dpll;
364 	const enum intel_dpll_id id = pll->info->id;
365 
366 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
367 
368 	if (shared_dpll[id].pipe_mask == 0)
369 		shared_dpll[id].hw_state = *pll_state;
370 
371 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
372 		pipe_name(crtc->pipe));
373 
374 	shared_dpll[id].pipe_mask |= BIT(crtc->pipe);
375 }
376 
377 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
378 					  const struct intel_crtc *crtc,
379 					  const struct intel_shared_dpll *pll)
380 {
381 	struct intel_shared_dpll_state *shared_dpll;
382 
383 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
384 	shared_dpll[pll->info->id].pipe_mask &= ~BIT(crtc->pipe);
385 }
386 
387 static void intel_put_dpll(struct intel_atomic_state *state,
388 			   struct intel_crtc *crtc)
389 {
390 	const struct intel_crtc_state *old_crtc_state =
391 		intel_atomic_get_old_crtc_state(state, crtc);
392 	struct intel_crtc_state *new_crtc_state =
393 		intel_atomic_get_new_crtc_state(state, crtc);
394 
395 	new_crtc_state->shared_dpll = NULL;
396 
397 	if (!old_crtc_state->shared_dpll)
398 		return;
399 
400 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
401 }
402 
403 /**
404  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
405  * @state: atomic state
406  *
407  * This is the dpll version of drm_atomic_helper_swap_state() since the
408  * helper does not handle driver-specific global state.
409  *
410  * For consistency with atomic helpers this function does a complete swap,
411  * i.e. it also puts the current state into @state, even though there is no
412  * need for that at this moment.
413  */
414 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
415 {
416 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
417 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
418 	enum intel_dpll_id i;
419 
420 	if (!state->dpll_set)
421 		return;
422 
423 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
424 		struct intel_shared_dpll *pll =
425 			&dev_priv->dpll.shared_dplls[i];
426 
427 		swap(pll->state, shared_dpll[i]);
428 	}
429 }
430 
431 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
432 				      struct intel_shared_dpll *pll,
433 				      struct intel_dpll_hw_state *hw_state)
434 {
435 	const enum intel_dpll_id id = pll->info->id;
436 	intel_wakeref_t wakeref;
437 	u32 val;
438 
439 	wakeref = intel_display_power_get_if_enabled(dev_priv,
440 						     POWER_DOMAIN_DISPLAY_CORE);
441 	if (!wakeref)
442 		return false;
443 
444 	val = intel_de_read(dev_priv, PCH_DPLL(id));
445 	hw_state->dpll = val;
446 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
447 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
448 
449 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
450 
451 	return val & DPLL_VCO_ENABLE;
452 }
453 
454 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
455 				 struct intel_shared_dpll *pll)
456 {
457 	const enum intel_dpll_id id = pll->info->id;
458 
459 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
460 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
461 }
462 
463 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
464 {
465 	u32 val;
466 	bool enabled;
467 
468 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
469 
470 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
471 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
472 			    DREF_SUPERSPREAD_SOURCE_MASK));
473 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
474 }
475 
476 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
477 				struct intel_shared_dpll *pll)
478 {
479 	const enum intel_dpll_id id = pll->info->id;
480 
481 	/* PCH refclock must be enabled first */
482 	ibx_assert_pch_refclk_enabled(dev_priv);
483 
484 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
485 
486 	/* Wait for the clocks to stabilize. */
487 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
488 	udelay(150);
489 
490 	/* The pixel multiplier can only be updated once the
491 	 * DPLL is enabled and the clocks are stable.
492 	 *
493 	 * So write it again.
494 	 */
495 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
496 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
497 	udelay(200);
498 }
499 
500 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
501 				 struct intel_shared_dpll *pll)
502 {
503 	const enum intel_dpll_id id = pll->info->id;
504 
505 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
506 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
507 	udelay(200);
508 }
509 
510 static bool ibx_get_dpll(struct intel_atomic_state *state,
511 			 struct intel_crtc *crtc,
512 			 struct intel_encoder *encoder)
513 {
514 	struct intel_crtc_state *crtc_state =
515 		intel_atomic_get_new_crtc_state(state, crtc);
516 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
517 	struct intel_shared_dpll *pll;
518 	enum intel_dpll_id i;
519 
520 	if (HAS_PCH_IBX(dev_priv)) {
521 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
522 		i = (enum intel_dpll_id) crtc->pipe;
523 		pll = &dev_priv->dpll.shared_dplls[i];
524 
525 		drm_dbg_kms(&dev_priv->drm,
526 			    "[CRTC:%d:%s] using pre-allocated %s\n",
527 			    crtc->base.base.id, crtc->base.name,
528 			    pll->info->name);
529 	} else {
530 		pll = intel_find_shared_dpll(state, crtc,
531 					     &crtc_state->dpll_hw_state,
532 					     BIT(DPLL_ID_PCH_PLL_B) |
533 					     BIT(DPLL_ID_PCH_PLL_A));
534 	}
535 
536 	if (!pll)
537 		return false;
538 
539 	/* reference the pll */
540 	intel_reference_shared_dpll(state, crtc,
541 				    pll, &crtc_state->dpll_hw_state);
542 
543 	crtc_state->shared_dpll = pll;
544 
545 	return true;
546 }
547 
548 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
549 			      const struct intel_dpll_hw_state *hw_state)
550 {
551 	drm_dbg_kms(&dev_priv->drm,
552 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
553 		    "fp0: 0x%x, fp1: 0x%x\n",
554 		    hw_state->dpll,
555 		    hw_state->dpll_md,
556 		    hw_state->fp0,
557 		    hw_state->fp1);
558 }
559 
560 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
561 	.prepare = ibx_pch_dpll_prepare,
562 	.enable = ibx_pch_dpll_enable,
563 	.disable = ibx_pch_dpll_disable,
564 	.get_hw_state = ibx_pch_dpll_get_hw_state,
565 };
566 
567 static const struct dpll_info pch_plls[] = {
568 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
569 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
570 	{ },
571 };
572 
573 static const struct intel_dpll_mgr pch_pll_mgr = {
574 	.dpll_info = pch_plls,
575 	.get_dplls = ibx_get_dpll,
576 	.put_dplls = intel_put_dpll,
577 	.dump_hw_state = ibx_dump_hw_state,
578 };
579 
580 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
581 			       struct intel_shared_dpll *pll)
582 {
583 	const enum intel_dpll_id id = pll->info->id;
584 
585 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
586 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
587 	udelay(20);
588 }
589 
590 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
591 				struct intel_shared_dpll *pll)
592 {
593 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
594 	intel_de_posting_read(dev_priv, SPLL_CTL);
595 	udelay(20);
596 }
597 
598 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
599 				  struct intel_shared_dpll *pll)
600 {
601 	const enum intel_dpll_id id = pll->info->id;
602 	u32 val;
603 
604 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
605 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
606 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
607 
608 	/*
609 	 * Try to set up the PCH reference clock once all DPLLs
610 	 * that depend on it have been shut down.
611 	 */
612 	if (dev_priv->pch_ssc_use & BIT(id))
613 		intel_init_pch_refclk(dev_priv);
614 }
615 
616 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
617 				 struct intel_shared_dpll *pll)
618 {
619 	enum intel_dpll_id id = pll->info->id;
620 	u32 val;
621 
622 	val = intel_de_read(dev_priv, SPLL_CTL);
623 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
624 	intel_de_posting_read(dev_priv, SPLL_CTL);
625 
626 	/*
627 	 * Try to set up the PCH reference clock once all DPLLs
628 	 * that depend on it have been shut down.
629 	 */
630 	if (dev_priv->pch_ssc_use & BIT(id))
631 		intel_init_pch_refclk(dev_priv);
632 }
633 
634 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
635 				       struct intel_shared_dpll *pll,
636 				       struct intel_dpll_hw_state *hw_state)
637 {
638 	const enum intel_dpll_id id = pll->info->id;
639 	intel_wakeref_t wakeref;
640 	u32 val;
641 
642 	wakeref = intel_display_power_get_if_enabled(dev_priv,
643 						     POWER_DOMAIN_DISPLAY_CORE);
644 	if (!wakeref)
645 		return false;
646 
647 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
648 	hw_state->wrpll = val;
649 
650 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
651 
652 	return val & WRPLL_PLL_ENABLE;
653 }
654 
655 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
656 				      struct intel_shared_dpll *pll,
657 				      struct intel_dpll_hw_state *hw_state)
658 {
659 	intel_wakeref_t wakeref;
660 	u32 val;
661 
662 	wakeref = intel_display_power_get_if_enabled(dev_priv,
663 						     POWER_DOMAIN_DISPLAY_CORE);
664 	if (!wakeref)
665 		return false;
666 
667 	val = intel_de_read(dev_priv, SPLL_CTL);
668 	hw_state->spll = val;
669 
670 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
671 
672 	return val & SPLL_PLL_ENABLE;
673 }
674 
675 #define LC_FREQ 2700
676 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
677 
678 #define P_MIN 2
679 #define P_MAX 64
680 #define P_INC 2
681 
682 /* Constraints for PLL good behavior */
683 #define REF_MIN 48
684 #define REF_MAX 400
685 #define VCO_MIN 2400
686 #define VCO_MAX 4800
687 
688 struct hsw_wrpll_rnp {
689 	unsigned p, n2, r2;
690 };
691 
692 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
693 {
694 	unsigned budget;
695 
696 	switch (clock) {
697 	case 25175000:
698 	case 25200000:
699 	case 27000000:
700 	case 27027000:
701 	case 37762500:
702 	case 37800000:
703 	case 40500000:
704 	case 40541000:
705 	case 54000000:
706 	case 54054000:
707 	case 59341000:
708 	case 59400000:
709 	case 72000000:
710 	case 74176000:
711 	case 74250000:
712 	case 81000000:
713 	case 81081000:
714 	case 89012000:
715 	case 89100000:
716 	case 108000000:
717 	case 108108000:
718 	case 111264000:
719 	case 111375000:
720 	case 148352000:
721 	case 148500000:
722 	case 162000000:
723 	case 162162000:
724 	case 222525000:
725 	case 222750000:
726 	case 296703000:
727 	case 297000000:
728 		budget = 0;
729 		break;
730 	case 233500000:
731 	case 245250000:
732 	case 247750000:
733 	case 253250000:
734 	case 298000000:
735 		budget = 1500;
736 		break;
737 	case 169128000:
738 	case 169500000:
739 	case 179500000:
740 	case 202000000:
741 		budget = 2000;
742 		break;
743 	case 256250000:
744 	case 262500000:
745 	case 270000000:
746 	case 272500000:
747 	case 273750000:
748 	case 280750000:
749 	case 281250000:
750 	case 286000000:
751 	case 291750000:
752 		budget = 4000;
753 		break;
754 	case 267250000:
755 	case 268500000:
756 		budget = 5000;
757 		break;
758 	default:
759 		budget = 1000;
760 		break;
761 	}
762 
763 	return budget;
764 }
765 
766 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
767 				 unsigned int r2, unsigned int n2,
768 				 unsigned int p,
769 				 struct hsw_wrpll_rnp *best)
770 {
771 	u64 a, b, c, d, diff, diff_best;
772 
773 	/* No best (r,n,p) yet */
774 	if (best->p == 0) {
775 		best->p = p;
776 		best->n2 = n2;
777 		best->r2 = r2;
778 		return;
779 	}
780 
781 	/*
782 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
783 	 * freq2k.
784 	 *
785 	 * delta = 1e6 *
786 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
787 	 *	   freq2k;
788 	 *
789 	 * and we would like delta <= budget.
790 	 *
791 	 * If the discrepancy is above the PPM-based budget, always prefer to
792 	 * improve upon the previous solution.  However, if you're within the
793 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
794 	 */
795 	a = freq2k * budget * p * r2;
796 	b = freq2k * budget * best->p * best->r2;
797 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
798 	diff_best = abs_diff(freq2k * best->p * best->r2,
799 			     LC_FREQ_2K * best->n2);
800 	c = 1000000 * diff;
801 	d = 1000000 * diff_best;
802 
803 	if (a < c && b < d) {
804 		/* If both are above the budget, pick the closer */
805 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
806 			best->p = p;
807 			best->n2 = n2;
808 			best->r2 = r2;
809 		}
810 	} else if (a >= c && b < d) {
811 		/* If A is below the threshold but B is above it?  Update. */
812 		best->p = p;
813 		best->n2 = n2;
814 		best->r2 = r2;
815 	} else if (a >= c && b >= d) {
816 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
817 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
818 			best->p = p;
819 			best->n2 = n2;
820 			best->r2 = r2;
821 		}
822 	}
823 	/* Otherwise a < c && b >= d, do nothing */
824 }
825 
826 static void
827 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
828 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
829 {
830 	u64 freq2k;
831 	unsigned p, n2, r2;
832 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
833 	unsigned budget;
834 
835 	freq2k = clock / 100;
836 
837 	budget = hsw_wrpll_get_budget_for_freq(clock);
838 
839 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
840 	 * and directly pass the LC PLL to it. */
841 	if (freq2k == 5400000) {
842 		*n2_out = 2;
843 		*p_out = 1;
844 		*r2_out = 2;
845 		return;
846 	}
847 
848 	/*
849 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
850 	 * the WR PLL.
851 	 *
852 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
853 	 * Injecting R2 = 2 * R gives:
854 	 *   REF_MAX * r2 > LC_FREQ * 2 and
855 	 *   REF_MIN * r2 < LC_FREQ * 2
856 	 *
857 	 * Which means the desired boundaries for r2 are:
858 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
859 	 *
860 	 */
861 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
862 	     r2 <= LC_FREQ * 2 / REF_MIN;
863 	     r2++) {
864 
865 		/*
866 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
867 		 *
868 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
869 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
870 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
871 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
872 		 *
873 		 * Which means the desired boundaries for n2 are:
874 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
875 		 */
876 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
877 		     n2 <= VCO_MAX * r2 / LC_FREQ;
878 		     n2++) {
879 
880 			for (p = P_MIN; p <= P_MAX; p += P_INC)
881 				hsw_wrpll_update_rnp(freq2k, budget,
882 						     r2, n2, p, &best);
883 		}
884 	}
885 
886 	*n2_out = best.n2;
887 	*p_out = best.p;
888 	*r2_out = best.r2;
889 }
890 
891 static struct intel_shared_dpll *
892 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
893 		       struct intel_crtc *crtc)
894 {
895 	struct intel_crtc_state *crtc_state =
896 		intel_atomic_get_new_crtc_state(state, crtc);
897 	struct intel_shared_dpll *pll;
898 	u32 val;
899 	unsigned int p, n2, r2;
900 
901 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
902 
903 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
904 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
905 	      WRPLL_DIVIDER_POST(p);
906 
907 	crtc_state->dpll_hw_state.wrpll = val;
908 
909 	pll = intel_find_shared_dpll(state, crtc,
910 				     &crtc_state->dpll_hw_state,
911 				     BIT(DPLL_ID_WRPLL2) |
912 				     BIT(DPLL_ID_WRPLL1));
913 
914 	if (!pll)
915 		return NULL;
916 
917 	return pll;
918 }
919 
920 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
921 				  const struct intel_shared_dpll *pll,
922 				  const struct intel_dpll_hw_state *pll_state)
923 {
924 	int refclk;
925 	int n, p, r;
926 	u32 wrpll = pll_state->wrpll;
927 
928 	switch (wrpll & WRPLL_REF_MASK) {
929 	case WRPLL_REF_SPECIAL_HSW:
930 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
931 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
932 			refclk = dev_priv->dpll.ref_clks.nssc;
933 			break;
934 		}
935 		fallthrough;
936 	case WRPLL_REF_PCH_SSC:
937 		/*
938 		 * We could calculate spread here, but our checking
939 		 * code only cares about 5% accuracy, and spread is a max of
940 		 * 0.5% downspread.
941 		 */
942 		refclk = dev_priv->dpll.ref_clks.ssc;
943 		break;
944 	case WRPLL_REF_LCPLL:
945 		refclk = 2700000;
946 		break;
947 	default:
948 		MISSING_CASE(wrpll);
949 		return 0;
950 	}
951 
952 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
953 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
954 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
955 
956 	/* Convert to KHz, p & r have a fixed point portion */
957 	return (refclk * n / 10) / (p * r) * 2;
958 }
959 
960 static struct intel_shared_dpll *
961 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
962 {
963 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
964 	struct intel_shared_dpll *pll;
965 	enum intel_dpll_id pll_id;
966 	int clock = crtc_state->port_clock;
967 
968 	switch (clock / 2) {
969 	case 81000:
970 		pll_id = DPLL_ID_LCPLL_810;
971 		break;
972 	case 135000:
973 		pll_id = DPLL_ID_LCPLL_1350;
974 		break;
975 	case 270000:
976 		pll_id = DPLL_ID_LCPLL_2700;
977 		break;
978 	default:
979 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
980 			    clock);
981 		return NULL;
982 	}
983 
984 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
985 
986 	if (!pll)
987 		return NULL;
988 
989 	return pll;
990 }
991 
992 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
993 				  const struct intel_shared_dpll *pll,
994 				  const struct intel_dpll_hw_state *pll_state)
995 {
996 	int link_clock = 0;
997 
998 	switch (pll->info->id) {
999 	case DPLL_ID_LCPLL_810:
1000 		link_clock = 81000;
1001 		break;
1002 	case DPLL_ID_LCPLL_1350:
1003 		link_clock = 135000;
1004 		break;
1005 	case DPLL_ID_LCPLL_2700:
1006 		link_clock = 270000;
1007 		break;
1008 	default:
1009 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
1010 		break;
1011 	}
1012 
1013 	return link_clock * 2;
1014 }
1015 
1016 static struct intel_shared_dpll *
1017 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
1018 		      struct intel_crtc *crtc)
1019 {
1020 	struct intel_crtc_state *crtc_state =
1021 		intel_atomic_get_new_crtc_state(state, crtc);
1022 
1023 	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1024 		return NULL;
1025 
1026 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1027 					 SPLL_REF_MUXED_SSC;
1028 
1029 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1030 				      BIT(DPLL_ID_SPLL));
1031 }
1032 
1033 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1034 				 const struct intel_shared_dpll *pll,
1035 				 const struct intel_dpll_hw_state *pll_state)
1036 {
1037 	int link_clock = 0;
1038 
1039 	switch (pll_state->spll & SPLL_FREQ_MASK) {
1040 	case SPLL_FREQ_810MHz:
1041 		link_clock = 81000;
1042 		break;
1043 	case SPLL_FREQ_1350MHz:
1044 		link_clock = 135000;
1045 		break;
1046 	case SPLL_FREQ_2700MHz:
1047 		link_clock = 270000;
1048 		break;
1049 	default:
1050 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1051 		break;
1052 	}
1053 
1054 	return link_clock * 2;
1055 }
1056 
1057 static bool hsw_get_dpll(struct intel_atomic_state *state,
1058 			 struct intel_crtc *crtc,
1059 			 struct intel_encoder *encoder)
1060 {
1061 	struct intel_crtc_state *crtc_state =
1062 		intel_atomic_get_new_crtc_state(state, crtc);
1063 	struct intel_shared_dpll *pll;
1064 
1065 	memset(&crtc_state->dpll_hw_state, 0,
1066 	       sizeof(crtc_state->dpll_hw_state));
1067 
1068 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1069 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1070 	else if (intel_crtc_has_dp_encoder(crtc_state))
1071 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1072 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1073 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1074 	else
1075 		return false;
1076 
1077 	if (!pll)
1078 		return false;
1079 
1080 	intel_reference_shared_dpll(state, crtc,
1081 				    pll, &crtc_state->dpll_hw_state);
1082 
1083 	crtc_state->shared_dpll = pll;
1084 
1085 	return true;
1086 }
1087 
1088 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1089 {
1090 	i915->dpll.ref_clks.ssc = 135000;
1091 	/* Non-SSC is only used on non-ULT HSW. */
1092 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1093 		i915->dpll.ref_clks.nssc = 24000;
1094 	else
1095 		i915->dpll.ref_clks.nssc = 135000;
1096 }
1097 
1098 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1099 			      const struct intel_dpll_hw_state *hw_state)
1100 {
1101 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1102 		    hw_state->wrpll, hw_state->spll);
1103 }
1104 
1105 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1106 	.enable = hsw_ddi_wrpll_enable,
1107 	.disable = hsw_ddi_wrpll_disable,
1108 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1109 	.get_freq = hsw_ddi_wrpll_get_freq,
1110 };
1111 
1112 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1113 	.enable = hsw_ddi_spll_enable,
1114 	.disable = hsw_ddi_spll_disable,
1115 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1116 	.get_freq = hsw_ddi_spll_get_freq,
1117 };
1118 
1119 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1120 				 struct intel_shared_dpll *pll)
1121 {
1122 }
1123 
1124 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1125 				  struct intel_shared_dpll *pll)
1126 {
1127 }
1128 
1129 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1130 				       struct intel_shared_dpll *pll,
1131 				       struct intel_dpll_hw_state *hw_state)
1132 {
1133 	return true;
1134 }
1135 
1136 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1137 	.enable = hsw_ddi_lcpll_enable,
1138 	.disable = hsw_ddi_lcpll_disable,
1139 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1140 	.get_freq = hsw_ddi_lcpll_get_freq,
1141 };
1142 
1143 static const struct dpll_info hsw_plls[] = {
1144 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1145 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1146 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1147 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1148 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1149 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1150 	{ },
1151 };
1152 
1153 static const struct intel_dpll_mgr hsw_pll_mgr = {
1154 	.dpll_info = hsw_plls,
1155 	.get_dplls = hsw_get_dpll,
1156 	.put_dplls = intel_put_dpll,
1157 	.update_ref_clks = hsw_update_dpll_ref_clks,
1158 	.dump_hw_state = hsw_dump_hw_state,
1159 };
1160 
1161 struct skl_dpll_regs {
1162 	i915_reg_t ctl, cfgcr1, cfgcr2;
1163 };
1164 
1165 /* this array is indexed by the *shared* pll id */
1166 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1167 	{
1168 		/* DPLL 0 */
1169 		.ctl = LCPLL1_CTL,
1170 		/* DPLL 0 doesn't support HDMI mode */
1171 	},
1172 	{
1173 		/* DPLL 1 */
1174 		.ctl = LCPLL2_CTL,
1175 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1176 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1177 	},
1178 	{
1179 		/* DPLL 2 */
1180 		.ctl = WRPLL_CTL(0),
1181 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1182 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1183 	},
1184 	{
1185 		/* DPLL 3 */
1186 		.ctl = WRPLL_CTL(1),
1187 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1188 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1189 	},
1190 };
1191 
1192 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1193 				    struct intel_shared_dpll *pll)
1194 {
1195 	const enum intel_dpll_id id = pll->info->id;
1196 	u32 val;
1197 
1198 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1199 
1200 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1201 		 DPLL_CTRL1_SSC(id) |
1202 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1203 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1204 
1205 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1206 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1207 }
1208 
1209 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1210 			       struct intel_shared_dpll *pll)
1211 {
1212 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1213 	const enum intel_dpll_id id = pll->info->id;
1214 
1215 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1216 
1217 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1218 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1219 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1220 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1221 
1222 	/* the enable bit is always bit 31 */
1223 	intel_de_write(dev_priv, regs[id].ctl,
1224 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1225 
1226 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1227 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1228 }
1229 
1230 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1231 				 struct intel_shared_dpll *pll)
1232 {
1233 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1234 }
1235 
1236 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1237 				struct intel_shared_dpll *pll)
1238 {
1239 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1240 	const enum intel_dpll_id id = pll->info->id;
1241 
1242 	/* the enable bit is always bit 31 */
1243 	intel_de_write(dev_priv, regs[id].ctl,
1244 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1245 	intel_de_posting_read(dev_priv, regs[id].ctl);
1246 }
1247 
1248 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1249 				  struct intel_shared_dpll *pll)
1250 {
1251 }
1252 
1253 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1254 				     struct intel_shared_dpll *pll,
1255 				     struct intel_dpll_hw_state *hw_state)
1256 {
1257 	u32 val;
1258 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1259 	const enum intel_dpll_id id = pll->info->id;
1260 	intel_wakeref_t wakeref;
1261 	bool ret;
1262 
1263 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1264 						     POWER_DOMAIN_DISPLAY_CORE);
1265 	if (!wakeref)
1266 		return false;
1267 
1268 	ret = false;
1269 
1270 	val = intel_de_read(dev_priv, regs[id].ctl);
1271 	if (!(val & LCPLL_PLL_ENABLE))
1272 		goto out;
1273 
1274 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1275 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1276 
1277 	/* avoid reading back stale values if HDMI mode is not enabled */
1278 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1279 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1280 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1281 	}
1282 	ret = true;
1283 
1284 out:
1285 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1286 
1287 	return ret;
1288 }
1289 
1290 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1291 				       struct intel_shared_dpll *pll,
1292 				       struct intel_dpll_hw_state *hw_state)
1293 {
1294 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1295 	const enum intel_dpll_id id = pll->info->id;
1296 	intel_wakeref_t wakeref;
1297 	u32 val;
1298 	bool ret;
1299 
1300 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1301 						     POWER_DOMAIN_DISPLAY_CORE);
1302 	if (!wakeref)
1303 		return false;
1304 
1305 	ret = false;
1306 
1307 	/* DPLL0 is always enabled since it drives CDCLK */
1308 	val = intel_de_read(dev_priv, regs[id].ctl);
1309 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1310 		goto out;
1311 
1312 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1313 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1314 
1315 	ret = true;
1316 
1317 out:
1318 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1319 
1320 	return ret;
1321 }
1322 
1323 struct skl_wrpll_context {
1324 	u64 min_deviation;		/* current minimal deviation */
1325 	u64 central_freq;		/* chosen central freq */
1326 	u64 dco_freq;			/* chosen dco freq */
1327 	unsigned int p;			/* chosen divider */
1328 };
1329 
1330 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1331 {
1332 	memset(ctx, 0, sizeof(*ctx));
1333 
1334 	ctx->min_deviation = U64_MAX;
1335 }
1336 
1337 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1338 #define SKL_DCO_MAX_PDEVIATION	100
1339 #define SKL_DCO_MAX_NDEVIATION	600
1340 
1341 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1342 				  u64 central_freq,
1343 				  u64 dco_freq,
1344 				  unsigned int divider)
1345 {
1346 	u64 deviation;
1347 
1348 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1349 			      central_freq);
1350 
1351 	/* positive deviation */
1352 	if (dco_freq >= central_freq) {
1353 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1354 		    deviation < ctx->min_deviation) {
1355 			ctx->min_deviation = deviation;
1356 			ctx->central_freq = central_freq;
1357 			ctx->dco_freq = dco_freq;
1358 			ctx->p = divider;
1359 		}
1360 	/* negative deviation */
1361 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1362 		   deviation < ctx->min_deviation) {
1363 		ctx->min_deviation = deviation;
1364 		ctx->central_freq = central_freq;
1365 		ctx->dco_freq = dco_freq;
1366 		ctx->p = divider;
1367 	}
1368 }
1369 
1370 static void skl_wrpll_get_multipliers(unsigned int p,
1371 				      unsigned int *p0 /* out */,
1372 				      unsigned int *p1 /* out */,
1373 				      unsigned int *p2 /* out */)
1374 {
1375 	/* even dividers */
1376 	if (p % 2 == 0) {
1377 		unsigned int half = p / 2;
1378 
1379 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1380 			*p0 = 2;
1381 			*p1 = 1;
1382 			*p2 = half;
1383 		} else if (half % 2 == 0) {
1384 			*p0 = 2;
1385 			*p1 = half / 2;
1386 			*p2 = 2;
1387 		} else if (half % 3 == 0) {
1388 			*p0 = 3;
1389 			*p1 = half / 3;
1390 			*p2 = 2;
1391 		} else if (half % 7 == 0) {
1392 			*p0 = 7;
1393 			*p1 = half / 7;
1394 			*p2 = 2;
1395 		}
1396 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1397 		*p0 = 3;
1398 		*p1 = 1;
1399 		*p2 = p / 3;
1400 	} else if (p == 5 || p == 7) {
1401 		*p0 = p;
1402 		*p1 = 1;
1403 		*p2 = 1;
1404 	} else if (p == 15) {
1405 		*p0 = 3;
1406 		*p1 = 1;
1407 		*p2 = 5;
1408 	} else if (p == 21) {
1409 		*p0 = 7;
1410 		*p1 = 1;
1411 		*p2 = 3;
1412 	} else if (p == 35) {
1413 		*p0 = 7;
1414 		*p1 = 1;
1415 		*p2 = 5;
1416 	}
1417 }
1418 
1419 struct skl_wrpll_params {
1420 	u32 dco_fraction;
1421 	u32 dco_integer;
1422 	u32 qdiv_ratio;
1423 	u32 qdiv_mode;
1424 	u32 kdiv;
1425 	u32 pdiv;
1426 	u32 central_freq;
1427 };
1428 
1429 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1430 				      u64 afe_clock,
1431 				      int ref_clock,
1432 				      u64 central_freq,
1433 				      u32 p0, u32 p1, u32 p2)
1434 {
1435 	u64 dco_freq;
1436 
1437 	switch (central_freq) {
1438 	case 9600000000ULL:
1439 		params->central_freq = 0;
1440 		break;
1441 	case 9000000000ULL:
1442 		params->central_freq = 1;
1443 		break;
1444 	case 8400000000ULL:
1445 		params->central_freq = 3;
1446 	}
1447 
1448 	switch (p0) {
1449 	case 1:
1450 		params->pdiv = 0;
1451 		break;
1452 	case 2:
1453 		params->pdiv = 1;
1454 		break;
1455 	case 3:
1456 		params->pdiv = 2;
1457 		break;
1458 	case 7:
1459 		params->pdiv = 4;
1460 		break;
1461 	default:
1462 		WARN(1, "Incorrect PDiv\n");
1463 	}
1464 
1465 	switch (p2) {
1466 	case 5:
1467 		params->kdiv = 0;
1468 		break;
1469 	case 2:
1470 		params->kdiv = 1;
1471 		break;
1472 	case 3:
1473 		params->kdiv = 2;
1474 		break;
1475 	case 1:
1476 		params->kdiv = 3;
1477 		break;
1478 	default:
1479 		WARN(1, "Incorrect KDiv\n");
1480 	}
1481 
1482 	params->qdiv_ratio = p1;
1483 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1484 
1485 	dco_freq = p0 * p1 * p2 * afe_clock;
1486 
1487 	/*
1488 	 * Intermediate values are in Hz.
1489 	 * Divide by MHz to match bsepc
1490 	 */
1491 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1492 	params->dco_fraction =
1493 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1494 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1495 }
1496 
1497 static bool
1498 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1499 			int ref_clock,
1500 			struct skl_wrpll_params *wrpll_params)
1501 {
1502 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1503 	u64 dco_central_freq[3] = { 8400000000ULL,
1504 				    9000000000ULL,
1505 				    9600000000ULL };
1506 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1507 					     24, 28, 30, 32, 36, 40, 42, 44,
1508 					     48, 52, 54, 56, 60, 64, 66, 68,
1509 					     70, 72, 76, 78, 80, 84, 88, 90,
1510 					     92, 96, 98 };
1511 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1512 	static const struct {
1513 		const int *list;
1514 		int n_dividers;
1515 	} dividers[] = {
1516 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1517 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1518 	};
1519 	struct skl_wrpll_context ctx;
1520 	unsigned int dco, d, i;
1521 	unsigned int p0, p1, p2;
1522 
1523 	skl_wrpll_context_init(&ctx);
1524 
1525 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1526 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1527 			for (i = 0; i < dividers[d].n_dividers; i++) {
1528 				unsigned int p = dividers[d].list[i];
1529 				u64 dco_freq = p * afe_clock;
1530 
1531 				skl_wrpll_try_divider(&ctx,
1532 						      dco_central_freq[dco],
1533 						      dco_freq,
1534 						      p);
1535 				/*
1536 				 * Skip the remaining dividers if we're sure to
1537 				 * have found the definitive divider, we can't
1538 				 * improve a 0 deviation.
1539 				 */
1540 				if (ctx.min_deviation == 0)
1541 					goto skip_remaining_dividers;
1542 			}
1543 		}
1544 
1545 skip_remaining_dividers:
1546 		/*
1547 		 * If a solution is found with an even divider, prefer
1548 		 * this one.
1549 		 */
1550 		if (d == 0 && ctx.p)
1551 			break;
1552 	}
1553 
1554 	if (!ctx.p) {
1555 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1556 		return false;
1557 	}
1558 
1559 	/*
1560 	 * gcc incorrectly analyses that these can be used without being
1561 	 * initialized. To be fair, it's hard to guess.
1562 	 */
1563 	p0 = p1 = p2 = 0;
1564 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1565 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1566 				  ctx.central_freq, p0, p1, p2);
1567 
1568 	return true;
1569 }
1570 
1571 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1572 {
1573 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1574 	u32 ctrl1, cfgcr1, cfgcr2;
1575 	struct skl_wrpll_params wrpll_params = { 0, };
1576 
1577 	/*
1578 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1579 	 * as the DPLL id in this function.
1580 	 */
1581 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1582 
1583 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1584 
1585 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1586 				     i915->dpll.ref_clks.nssc,
1587 				     &wrpll_params))
1588 		return false;
1589 
1590 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1591 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1592 		wrpll_params.dco_integer;
1593 
1594 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1595 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1596 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1597 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1598 		wrpll_params.central_freq;
1599 
1600 	memset(&crtc_state->dpll_hw_state, 0,
1601 	       sizeof(crtc_state->dpll_hw_state));
1602 
1603 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1604 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1605 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1606 	return true;
1607 }
1608 
1609 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1610 				  const struct intel_shared_dpll *pll,
1611 				  const struct intel_dpll_hw_state *pll_state)
1612 {
1613 	int ref_clock = i915->dpll.ref_clks.nssc;
1614 	u32 p0, p1, p2, dco_freq;
1615 
1616 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1617 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1618 
1619 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1620 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1621 	else
1622 		p1 = 1;
1623 
1624 
1625 	switch (p0) {
1626 	case DPLL_CFGCR2_PDIV_1:
1627 		p0 = 1;
1628 		break;
1629 	case DPLL_CFGCR2_PDIV_2:
1630 		p0 = 2;
1631 		break;
1632 	case DPLL_CFGCR2_PDIV_3:
1633 		p0 = 3;
1634 		break;
1635 	case DPLL_CFGCR2_PDIV_7_INVALID:
1636 		/*
1637 		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1638 		 * handling it the same way as PDIV_7.
1639 		 */
1640 		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1641 		fallthrough;
1642 	case DPLL_CFGCR2_PDIV_7:
1643 		p0 = 7;
1644 		break;
1645 	default:
1646 		MISSING_CASE(p0);
1647 		return 0;
1648 	}
1649 
1650 	switch (p2) {
1651 	case DPLL_CFGCR2_KDIV_5:
1652 		p2 = 5;
1653 		break;
1654 	case DPLL_CFGCR2_KDIV_2:
1655 		p2 = 2;
1656 		break;
1657 	case DPLL_CFGCR2_KDIV_3:
1658 		p2 = 3;
1659 		break;
1660 	case DPLL_CFGCR2_KDIV_1:
1661 		p2 = 1;
1662 		break;
1663 	default:
1664 		MISSING_CASE(p2);
1665 		return 0;
1666 	}
1667 
1668 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1669 		   ref_clock;
1670 
1671 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1672 		    ref_clock / 0x8000;
1673 
1674 	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1675 		return 0;
1676 
1677 	return dco_freq / (p0 * p1 * p2 * 5);
1678 }
1679 
1680 static bool
1681 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1682 {
1683 	u32 ctrl1;
1684 
1685 	/*
1686 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1687 	 * as the DPLL id in this function.
1688 	 */
1689 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1690 	switch (crtc_state->port_clock / 2) {
1691 	case 81000:
1692 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1693 		break;
1694 	case 135000:
1695 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1696 		break;
1697 	case 270000:
1698 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1699 		break;
1700 		/* eDP 1.4 rates */
1701 	case 162000:
1702 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1703 		break;
1704 	case 108000:
1705 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1706 		break;
1707 	case 216000:
1708 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1709 		break;
1710 	}
1711 
1712 	memset(&crtc_state->dpll_hw_state, 0,
1713 	       sizeof(crtc_state->dpll_hw_state));
1714 
1715 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1716 
1717 	return true;
1718 }
1719 
1720 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1721 				  const struct intel_shared_dpll *pll,
1722 				  const struct intel_dpll_hw_state *pll_state)
1723 {
1724 	int link_clock = 0;
1725 
1726 	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1727 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1728 	case DPLL_CTRL1_LINK_RATE_810:
1729 		link_clock = 81000;
1730 		break;
1731 	case DPLL_CTRL1_LINK_RATE_1080:
1732 		link_clock = 108000;
1733 		break;
1734 	case DPLL_CTRL1_LINK_RATE_1350:
1735 		link_clock = 135000;
1736 		break;
1737 	case DPLL_CTRL1_LINK_RATE_1620:
1738 		link_clock = 162000;
1739 		break;
1740 	case DPLL_CTRL1_LINK_RATE_2160:
1741 		link_clock = 216000;
1742 		break;
1743 	case DPLL_CTRL1_LINK_RATE_2700:
1744 		link_clock = 270000;
1745 		break;
1746 	default:
1747 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1748 		break;
1749 	}
1750 
1751 	return link_clock * 2;
1752 }
1753 
1754 static bool skl_get_dpll(struct intel_atomic_state *state,
1755 			 struct intel_crtc *crtc,
1756 			 struct intel_encoder *encoder)
1757 {
1758 	struct intel_crtc_state *crtc_state =
1759 		intel_atomic_get_new_crtc_state(state, crtc);
1760 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1761 	struct intel_shared_dpll *pll;
1762 	bool bret;
1763 
1764 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1765 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1766 		if (!bret) {
1767 			drm_dbg_kms(&i915->drm,
1768 				    "Could not get HDMI pll dividers.\n");
1769 			return false;
1770 		}
1771 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1772 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1773 		if (!bret) {
1774 			drm_dbg_kms(&i915->drm,
1775 				    "Could not set DP dpll HW state.\n");
1776 			return false;
1777 		}
1778 	} else {
1779 		return false;
1780 	}
1781 
1782 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1783 		pll = intel_find_shared_dpll(state, crtc,
1784 					     &crtc_state->dpll_hw_state,
1785 					     BIT(DPLL_ID_SKL_DPLL0));
1786 	else
1787 		pll = intel_find_shared_dpll(state, crtc,
1788 					     &crtc_state->dpll_hw_state,
1789 					     BIT(DPLL_ID_SKL_DPLL3) |
1790 					     BIT(DPLL_ID_SKL_DPLL2) |
1791 					     BIT(DPLL_ID_SKL_DPLL1));
1792 	if (!pll)
1793 		return false;
1794 
1795 	intel_reference_shared_dpll(state, crtc,
1796 				    pll, &crtc_state->dpll_hw_state);
1797 
1798 	crtc_state->shared_dpll = pll;
1799 
1800 	return true;
1801 }
1802 
1803 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1804 				const struct intel_shared_dpll *pll,
1805 				const struct intel_dpll_hw_state *pll_state)
1806 {
1807 	/*
1808 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1809 	 * the internal shift for each field
1810 	 */
1811 	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1812 		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1813 	else
1814 		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1815 }
1816 
1817 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1818 {
1819 	/* No SSC ref */
1820 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1821 }
1822 
1823 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1824 			      const struct intel_dpll_hw_state *hw_state)
1825 {
1826 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1827 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1828 		      hw_state->ctrl1,
1829 		      hw_state->cfgcr1,
1830 		      hw_state->cfgcr2);
1831 }
1832 
1833 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1834 	.enable = skl_ddi_pll_enable,
1835 	.disable = skl_ddi_pll_disable,
1836 	.get_hw_state = skl_ddi_pll_get_hw_state,
1837 	.get_freq = skl_ddi_pll_get_freq,
1838 };
1839 
1840 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1841 	.enable = skl_ddi_dpll0_enable,
1842 	.disable = skl_ddi_dpll0_disable,
1843 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1844 	.get_freq = skl_ddi_pll_get_freq,
1845 };
1846 
1847 static const struct dpll_info skl_plls[] = {
1848 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1849 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1850 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1851 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1852 	{ },
1853 };
1854 
1855 static const struct intel_dpll_mgr skl_pll_mgr = {
1856 	.dpll_info = skl_plls,
1857 	.get_dplls = skl_get_dpll,
1858 	.put_dplls = intel_put_dpll,
1859 	.update_ref_clks = skl_update_dpll_ref_clks,
1860 	.dump_hw_state = skl_dump_hw_state,
1861 };
1862 
1863 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1864 				struct intel_shared_dpll *pll)
1865 {
1866 	u32 temp;
1867 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1868 	enum dpio_phy phy;
1869 	enum dpio_channel ch;
1870 
1871 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1872 
1873 	/* Non-SSC reference */
1874 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1875 	temp |= PORT_PLL_REF_SEL;
1876 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1877 
1878 	if (IS_GEMINILAKE(dev_priv)) {
1879 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1880 		temp |= PORT_PLL_POWER_ENABLE;
1881 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1882 
1883 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1884 				 PORT_PLL_POWER_STATE), 200))
1885 			drm_err(&dev_priv->drm,
1886 				"Power state not set for PLL:%d\n", port);
1887 	}
1888 
1889 	/* Disable 10 bit clock */
1890 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1891 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1892 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1893 
1894 	/* Write P1 & P2 */
1895 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1896 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1897 	temp |= pll->state.hw_state.ebb0;
1898 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1899 
1900 	/* Write M2 integer */
1901 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1902 	temp &= ~PORT_PLL_M2_MASK;
1903 	temp |= pll->state.hw_state.pll0;
1904 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1905 
1906 	/* Write N */
1907 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1908 	temp &= ~PORT_PLL_N_MASK;
1909 	temp |= pll->state.hw_state.pll1;
1910 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1911 
1912 	/* Write M2 fraction */
1913 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1914 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1915 	temp |= pll->state.hw_state.pll2;
1916 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1917 
1918 	/* Write M2 fraction enable */
1919 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1920 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1921 	temp |= pll->state.hw_state.pll3;
1922 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1923 
1924 	/* Write coeff */
1925 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1926 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1927 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1928 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1929 	temp |= pll->state.hw_state.pll6;
1930 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1931 
1932 	/* Write calibration val */
1933 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1934 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1935 	temp |= pll->state.hw_state.pll8;
1936 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1937 
1938 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1939 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1940 	temp |= pll->state.hw_state.pll9;
1941 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1942 
1943 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1944 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1945 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1946 	temp |= pll->state.hw_state.pll10;
1947 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1948 
1949 	/* Recalibrate with new settings */
1950 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1951 	temp |= PORT_PLL_RECALIBRATE;
1952 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1953 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1954 	temp |= pll->state.hw_state.ebb4;
1955 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1956 
1957 	/* Enable PLL */
1958 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1959 	temp |= PORT_PLL_ENABLE;
1960 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1961 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1962 
1963 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1964 			200))
1965 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1966 
1967 	if (IS_GEMINILAKE(dev_priv)) {
1968 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1969 		temp |= DCC_DELAY_RANGE_2;
1970 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1971 	}
1972 
1973 	/*
1974 	 * While we write to the group register to program all lanes at once we
1975 	 * can read only lane registers and we pick lanes 0/1 for that.
1976 	 */
1977 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1978 	temp &= ~LANE_STAGGER_MASK;
1979 	temp &= ~LANESTAGGER_STRAP_OVRD;
1980 	temp |= pll->state.hw_state.pcsdw12;
1981 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1982 }
1983 
1984 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1985 					struct intel_shared_dpll *pll)
1986 {
1987 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1988 	u32 temp;
1989 
1990 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1991 	temp &= ~PORT_PLL_ENABLE;
1992 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1993 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1994 
1995 	if (IS_GEMINILAKE(dev_priv)) {
1996 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1997 		temp &= ~PORT_PLL_POWER_ENABLE;
1998 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1999 
2000 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
2001 				  PORT_PLL_POWER_STATE), 200))
2002 			drm_err(&dev_priv->drm,
2003 				"Power state not reset for PLL:%d\n", port);
2004 	}
2005 }
2006 
2007 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2008 					struct intel_shared_dpll *pll,
2009 					struct intel_dpll_hw_state *hw_state)
2010 {
2011 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2012 	intel_wakeref_t wakeref;
2013 	enum dpio_phy phy;
2014 	enum dpio_channel ch;
2015 	u32 val;
2016 	bool ret;
2017 
2018 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2019 
2020 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2021 						     POWER_DOMAIN_DISPLAY_CORE);
2022 	if (!wakeref)
2023 		return false;
2024 
2025 	ret = false;
2026 
2027 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2028 	if (!(val & PORT_PLL_ENABLE))
2029 		goto out;
2030 
2031 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2032 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2033 
2034 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2035 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2036 
2037 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2038 	hw_state->pll0 &= PORT_PLL_M2_MASK;
2039 
2040 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2041 	hw_state->pll1 &= PORT_PLL_N_MASK;
2042 
2043 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2044 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2045 
2046 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2047 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2048 
2049 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2050 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2051 			  PORT_PLL_INT_COEFF_MASK |
2052 			  PORT_PLL_GAIN_CTL_MASK;
2053 
2054 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2055 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2056 
2057 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2058 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2059 
2060 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2061 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2062 			   PORT_PLL_DCO_AMP_MASK;
2063 
2064 	/*
2065 	 * While we write to the group register to program all lanes at once we
2066 	 * can read only lane registers. We configure all lanes the same way, so
2067 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2068 	 */
2069 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2070 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2071 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2072 		drm_dbg(&dev_priv->drm,
2073 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2074 			hw_state->pcsdw12,
2075 			intel_de_read(dev_priv,
2076 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2077 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2078 
2079 	ret = true;
2080 
2081 out:
2082 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2083 
2084 	return ret;
2085 }
2086 
2087 /* bxt clock parameters */
2088 struct bxt_clk_div {
2089 	int clock;
2090 	u32 p1;
2091 	u32 p2;
2092 	u32 m2_int;
2093 	u32 m2_frac;
2094 	bool m2_frac_en;
2095 	u32 n;
2096 
2097 	int vco;
2098 };
2099 
2100 /* pre-calculated values for DP linkrates */
2101 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2102 	{162000, 4, 2, 32, 1677722, 1, 1},
2103 	{270000, 4, 1, 27,       0, 0, 1},
2104 	{540000, 2, 1, 27,       0, 0, 1},
2105 	{216000, 3, 2, 32, 1677722, 1, 1},
2106 	{243000, 4, 1, 24, 1258291, 1, 1},
2107 	{324000, 4, 1, 32, 1677722, 1, 1},
2108 	{432000, 3, 1, 32, 1677722, 1, 1}
2109 };
2110 
2111 static bool
2112 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2113 			  struct bxt_clk_div *clk_div)
2114 {
2115 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2116 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2117 	struct dpll best_clock;
2118 
2119 	/* Calculate HDMI div */
2120 	/*
2121 	 * FIXME: tie the following calculation into
2122 	 * i9xx_crtc_compute_clock
2123 	 */
2124 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2125 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2126 			crtc_state->port_clock,
2127 			pipe_name(crtc->pipe));
2128 		return false;
2129 	}
2130 
2131 	clk_div->p1 = best_clock.p1;
2132 	clk_div->p2 = best_clock.p2;
2133 	drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2134 	clk_div->n = best_clock.n;
2135 	clk_div->m2_int = best_clock.m2 >> 22;
2136 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2137 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2138 
2139 	clk_div->vco = best_clock.vco;
2140 
2141 	return true;
2142 }
2143 
2144 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2145 				    struct bxt_clk_div *clk_div)
2146 {
2147 	int clock = crtc_state->port_clock;
2148 	int i;
2149 
2150 	*clk_div = bxt_dp_clk_val[0];
2151 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2152 		if (bxt_dp_clk_val[i].clock == clock) {
2153 			*clk_div = bxt_dp_clk_val[i];
2154 			break;
2155 		}
2156 	}
2157 
2158 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2159 }
2160 
2161 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2162 				      const struct bxt_clk_div *clk_div)
2163 {
2164 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2165 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2166 	int clock = crtc_state->port_clock;
2167 	int vco = clk_div->vco;
2168 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2169 	u32 lanestagger;
2170 
2171 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2172 
2173 	if (vco >= 6200000 && vco <= 6700000) {
2174 		prop_coef = 4;
2175 		int_coef = 9;
2176 		gain_ctl = 3;
2177 		targ_cnt = 8;
2178 	} else if ((vco > 5400000 && vco < 6200000) ||
2179 			(vco >= 4800000 && vco < 5400000)) {
2180 		prop_coef = 5;
2181 		int_coef = 11;
2182 		gain_ctl = 3;
2183 		targ_cnt = 9;
2184 	} else if (vco == 5400000) {
2185 		prop_coef = 3;
2186 		int_coef = 8;
2187 		gain_ctl = 1;
2188 		targ_cnt = 9;
2189 	} else {
2190 		drm_err(&i915->drm, "Invalid VCO\n");
2191 		return false;
2192 	}
2193 
2194 	if (clock > 270000)
2195 		lanestagger = 0x18;
2196 	else if (clock > 135000)
2197 		lanestagger = 0x0d;
2198 	else if (clock > 67000)
2199 		lanestagger = 0x07;
2200 	else if (clock > 33000)
2201 		lanestagger = 0x04;
2202 	else
2203 		lanestagger = 0x02;
2204 
2205 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2206 	dpll_hw_state->pll0 = clk_div->m2_int;
2207 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2208 	dpll_hw_state->pll2 = clk_div->m2_frac;
2209 
2210 	if (clk_div->m2_frac_en)
2211 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2212 
2213 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2214 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2215 
2216 	dpll_hw_state->pll8 = targ_cnt;
2217 
2218 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2219 
2220 	dpll_hw_state->pll10 =
2221 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2222 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2223 
2224 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2225 
2226 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2227 
2228 	return true;
2229 }
2230 
2231 static bool
2232 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2233 {
2234 	struct bxt_clk_div clk_div = {};
2235 
2236 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2237 
2238 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2239 }
2240 
2241 static bool
2242 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2243 {
2244 	struct bxt_clk_div clk_div = {};
2245 
2246 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2247 
2248 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2249 }
2250 
2251 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2252 				const struct intel_shared_dpll *pll,
2253 				const struct intel_dpll_hw_state *pll_state)
2254 {
2255 	struct dpll clock;
2256 
2257 	clock.m1 = 2;
2258 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2259 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2260 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2261 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2262 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2263 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2264 
2265 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2266 }
2267 
2268 static bool bxt_get_dpll(struct intel_atomic_state *state,
2269 			 struct intel_crtc *crtc,
2270 			 struct intel_encoder *encoder)
2271 {
2272 	struct intel_crtc_state *crtc_state =
2273 		intel_atomic_get_new_crtc_state(state, crtc);
2274 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2275 	struct intel_shared_dpll *pll;
2276 	enum intel_dpll_id id;
2277 
2278 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2279 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2280 		return false;
2281 
2282 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2283 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2284 		return false;
2285 
2286 	/* 1:1 mapping between ports and PLLs */
2287 	id = (enum intel_dpll_id) encoder->port;
2288 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2289 
2290 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2291 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2292 
2293 	intel_reference_shared_dpll(state, crtc,
2294 				    pll, &crtc_state->dpll_hw_state);
2295 
2296 	crtc_state->shared_dpll = pll;
2297 
2298 	return true;
2299 }
2300 
2301 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2302 {
2303 	i915->dpll.ref_clks.ssc = 100000;
2304 	i915->dpll.ref_clks.nssc = 100000;
2305 	/* DSI non-SSC ref 19.2MHz */
2306 }
2307 
2308 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2309 			      const struct intel_dpll_hw_state *hw_state)
2310 {
2311 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2312 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2313 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2314 		    hw_state->ebb0,
2315 		    hw_state->ebb4,
2316 		    hw_state->pll0,
2317 		    hw_state->pll1,
2318 		    hw_state->pll2,
2319 		    hw_state->pll3,
2320 		    hw_state->pll6,
2321 		    hw_state->pll8,
2322 		    hw_state->pll9,
2323 		    hw_state->pll10,
2324 		    hw_state->pcsdw12);
2325 }
2326 
2327 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2328 	.enable = bxt_ddi_pll_enable,
2329 	.disable = bxt_ddi_pll_disable,
2330 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2331 	.get_freq = bxt_ddi_pll_get_freq,
2332 };
2333 
2334 static const struct dpll_info bxt_plls[] = {
2335 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2336 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2337 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2338 	{ },
2339 };
2340 
2341 static const struct intel_dpll_mgr bxt_pll_mgr = {
2342 	.dpll_info = bxt_plls,
2343 	.get_dplls = bxt_get_dpll,
2344 	.put_dplls = intel_put_dpll,
2345 	.update_ref_clks = bxt_update_dpll_ref_clks,
2346 	.dump_hw_state = bxt_dump_hw_state,
2347 };
2348 
2349 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2350 			       struct intel_shared_dpll *pll)
2351 {
2352 	const enum intel_dpll_id id = pll->info->id;
2353 	u32 val;
2354 
2355 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2356 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2357 	val |= PLL_POWER_ENABLE;
2358 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2359 
2360 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2361 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2362 				  PLL_POWER_STATE, 5))
2363 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2364 
2365 	/*
2366 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2367 	 * select DP mode, and set DP link rate.
2368 	 */
2369 	val = pll->state.hw_state.cfgcr0;
2370 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2371 
2372 	/* 4. Reab back to ensure writes completed */
2373 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2374 
2375 	/* 3. Configure DPLL_CFGCR0 */
2376 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2377 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2378 		val = pll->state.hw_state.cfgcr1;
2379 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2380 		/* 4. Reab back to ensure writes completed */
2381 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2382 	}
2383 
2384 	/*
2385 	 * 5. If the frequency will result in a change to the voltage
2386 	 * requirement, follow the Display Voltage Frequency Switching
2387 	 * Sequence Before Frequency Change
2388 	 *
2389 	 * Note: DVFS is actually handled via the cdclk code paths,
2390 	 * hence we do nothing here.
2391 	 */
2392 
2393 	/* 6. Enable DPLL in DPLL_ENABLE. */
2394 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2395 	val |= PLL_ENABLE;
2396 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2397 
2398 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2399 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2400 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2401 
2402 	/*
2403 	 * 8. If the frequency will result in a change to the voltage
2404 	 * requirement, follow the Display Voltage Frequency Switching
2405 	 * Sequence After Frequency Change
2406 	 *
2407 	 * Note: DVFS is actually handled via the cdclk code paths,
2408 	 * hence we do nothing here.
2409 	 */
2410 
2411 	/*
2412 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2413 	 * Done at intel_ddi_clk_select
2414 	 */
2415 }
2416 
2417 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2418 				struct intel_shared_dpll *pll)
2419 {
2420 	const enum intel_dpll_id id = pll->info->id;
2421 	u32 val;
2422 
2423 	/*
2424 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2425 	 * Done at intel_ddi_post_disable
2426 	 */
2427 
2428 	/*
2429 	 * 2. If the frequency will result in a change to the voltage
2430 	 * requirement, follow the Display Voltage Frequency Switching
2431 	 * Sequence Before Frequency Change
2432 	 *
2433 	 * Note: DVFS is actually handled via the cdclk code paths,
2434 	 * hence we do nothing here.
2435 	 */
2436 
2437 	/* 3. Disable DPLL through DPLL_ENABLE. */
2438 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2439 	val &= ~PLL_ENABLE;
2440 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2441 
2442 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2443 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2444 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2445 
2446 	/*
2447 	 * 5. If the frequency will result in a change to the voltage
2448 	 * requirement, follow the Display Voltage Frequency Switching
2449 	 * Sequence After Frequency Change
2450 	 *
2451 	 * Note: DVFS is actually handled via the cdclk code paths,
2452 	 * hence we do nothing here.
2453 	 */
2454 
2455 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2456 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2457 	val &= ~PLL_POWER_ENABLE;
2458 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2459 
2460 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2461 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2462 				    PLL_POWER_STATE, 5))
2463 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2464 }
2465 
2466 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2467 				     struct intel_shared_dpll *pll,
2468 				     struct intel_dpll_hw_state *hw_state)
2469 {
2470 	const enum intel_dpll_id id = pll->info->id;
2471 	intel_wakeref_t wakeref;
2472 	u32 val;
2473 	bool ret;
2474 
2475 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2476 						     POWER_DOMAIN_DISPLAY_CORE);
2477 	if (!wakeref)
2478 		return false;
2479 
2480 	ret = false;
2481 
2482 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2483 	if (!(val & PLL_ENABLE))
2484 		goto out;
2485 
2486 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2487 	hw_state->cfgcr0 = val;
2488 
2489 	/* avoid reading back stale values if HDMI mode is not enabled */
2490 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2491 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2492 						 CNL_DPLL_CFGCR1(id));
2493 	}
2494 	ret = true;
2495 
2496 out:
2497 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2498 
2499 	return ret;
2500 }
2501 
2502 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2503 				      int *qdiv, int *kdiv)
2504 {
2505 	/* even dividers */
2506 	if (bestdiv % 2 == 0) {
2507 		if (bestdiv == 2) {
2508 			*pdiv = 2;
2509 			*qdiv = 1;
2510 			*kdiv = 1;
2511 		} else if (bestdiv % 4 == 0) {
2512 			*pdiv = 2;
2513 			*qdiv = bestdiv / 4;
2514 			*kdiv = 2;
2515 		} else if (bestdiv % 6 == 0) {
2516 			*pdiv = 3;
2517 			*qdiv = bestdiv / 6;
2518 			*kdiv = 2;
2519 		} else if (bestdiv % 5 == 0) {
2520 			*pdiv = 5;
2521 			*qdiv = bestdiv / 10;
2522 			*kdiv = 2;
2523 		} else if (bestdiv % 14 == 0) {
2524 			*pdiv = 7;
2525 			*qdiv = bestdiv / 14;
2526 			*kdiv = 2;
2527 		}
2528 	} else {
2529 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2530 			*pdiv = bestdiv;
2531 			*qdiv = 1;
2532 			*kdiv = 1;
2533 		} else { /* 9, 15, 21 */
2534 			*pdiv = bestdiv / 3;
2535 			*qdiv = 1;
2536 			*kdiv = 3;
2537 		}
2538 	}
2539 }
2540 
2541 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2542 				      u32 dco_freq, u32 ref_freq,
2543 				      int pdiv, int qdiv, int kdiv)
2544 {
2545 	u32 dco;
2546 
2547 	switch (kdiv) {
2548 	case 1:
2549 		params->kdiv = 1;
2550 		break;
2551 	case 2:
2552 		params->kdiv = 2;
2553 		break;
2554 	case 3:
2555 		params->kdiv = 4;
2556 		break;
2557 	default:
2558 		WARN(1, "Incorrect KDiv\n");
2559 	}
2560 
2561 	switch (pdiv) {
2562 	case 2:
2563 		params->pdiv = 1;
2564 		break;
2565 	case 3:
2566 		params->pdiv = 2;
2567 		break;
2568 	case 5:
2569 		params->pdiv = 4;
2570 		break;
2571 	case 7:
2572 		params->pdiv = 8;
2573 		break;
2574 	default:
2575 		WARN(1, "Incorrect PDiv\n");
2576 	}
2577 
2578 	WARN_ON(kdiv != 2 && qdiv != 1);
2579 
2580 	params->qdiv_ratio = qdiv;
2581 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2582 
2583 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2584 
2585 	params->dco_integer = dco >> 15;
2586 	params->dco_fraction = dco & 0x7fff;
2587 }
2588 
2589 static bool
2590 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2591 			  struct skl_wrpll_params *wrpll_params,
2592 			  int ref_clock)
2593 {
2594 	u32 afe_clock = crtc_state->port_clock * 5;
2595 	u32 dco_min = 7998000;
2596 	u32 dco_max = 10000000;
2597 	u32 dco_mid = (dco_min + dco_max) / 2;
2598 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2599 					 18, 20, 24, 28, 30, 32,  36,  40,
2600 					 42, 44, 48, 50, 52, 54,  56,  60,
2601 					 64, 66, 68, 70, 72, 76,  78,  80,
2602 					 84, 88, 90, 92, 96, 98, 100, 102,
2603 					  3,  5,  7,  9, 15, 21 };
2604 	u32 dco, best_dco = 0, dco_centrality = 0;
2605 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2606 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2607 
2608 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2609 		dco = afe_clock * dividers[d];
2610 
2611 		if ((dco <= dco_max) && (dco >= dco_min)) {
2612 			dco_centrality = abs(dco - dco_mid);
2613 
2614 			if (dco_centrality < best_dco_centrality) {
2615 				best_dco_centrality = dco_centrality;
2616 				best_div = dividers[d];
2617 				best_dco = dco;
2618 			}
2619 		}
2620 	}
2621 
2622 	if (best_div == 0)
2623 		return false;
2624 
2625 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2626 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2627 				  pdiv, qdiv, kdiv);
2628 
2629 	return true;
2630 }
2631 
2632 static bool
2633 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2634 			struct skl_wrpll_params *wrpll_params)
2635 {
2636 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2637 
2638 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2639 					 i915->dpll.ref_clks.nssc);
2640 }
2641 
2642 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2643 {
2644 	u32 cfgcr0, cfgcr1;
2645 	struct skl_wrpll_params wrpll_params = { 0, };
2646 
2647 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2648 
2649 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2650 		return false;
2651 
2652 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2653 		wrpll_params.dco_integer;
2654 
2655 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2656 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2657 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2658 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2659 		DPLL_CFGCR1_CENTRAL_FREQ;
2660 
2661 	memset(&crtc_state->dpll_hw_state, 0,
2662 	       sizeof(crtc_state->dpll_hw_state));
2663 
2664 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2665 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2666 	return true;
2667 }
2668 
2669 /*
2670  * Display WA #22010492432: ehl, tgl
2671  * Program half of the nominal DCO divider fraction value.
2672  */
2673 static bool
2674 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2675 {
2676 	return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2677 		 IS_JSL_EHL_REVID(i915, EHL_REVID_B0, REVID_FOREVER)) ||
2678 		 IS_TIGERLAKE(i915)) &&
2679 		 i915->dpll.ref_clks.nssc == 38400;
2680 }
2681 
2682 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2683 				    const struct intel_shared_dpll *pll,
2684 				    const struct intel_dpll_hw_state *pll_state,
2685 				    int ref_clock)
2686 {
2687 	u32 dco_fraction;
2688 	u32 p0, p1, p2, dco_freq;
2689 
2690 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2691 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2692 
2693 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2694 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2695 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2696 	else
2697 		p1 = 1;
2698 
2699 
2700 	switch (p0) {
2701 	case DPLL_CFGCR1_PDIV_2:
2702 		p0 = 2;
2703 		break;
2704 	case DPLL_CFGCR1_PDIV_3:
2705 		p0 = 3;
2706 		break;
2707 	case DPLL_CFGCR1_PDIV_5:
2708 		p0 = 5;
2709 		break;
2710 	case DPLL_CFGCR1_PDIV_7:
2711 		p0 = 7;
2712 		break;
2713 	}
2714 
2715 	switch (p2) {
2716 	case DPLL_CFGCR1_KDIV_1:
2717 		p2 = 1;
2718 		break;
2719 	case DPLL_CFGCR1_KDIV_2:
2720 		p2 = 2;
2721 		break;
2722 	case DPLL_CFGCR1_KDIV_3:
2723 		p2 = 3;
2724 		break;
2725 	}
2726 
2727 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2728 		   ref_clock;
2729 
2730 	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2731 		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2732 
2733 	if (ehl_combo_pll_div_frac_wa_needed(dev_priv))
2734 		dco_fraction *= 2;
2735 
2736 	dco_freq += (dco_fraction * ref_clock) / 0x8000;
2737 
2738 	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2739 		return 0;
2740 
2741 	return dco_freq / (p0 * p1 * p2 * 5);
2742 }
2743 
2744 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2745 				  const struct intel_shared_dpll *pll,
2746 				  const struct intel_dpll_hw_state *pll_state)
2747 {
2748 	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
2749 					i915->dpll.ref_clks.nssc);
2750 }
2751 
2752 static bool
2753 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2754 {
2755 	u32 cfgcr0;
2756 
2757 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2758 
2759 	switch (crtc_state->port_clock / 2) {
2760 	case 81000:
2761 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2762 		break;
2763 	case 135000:
2764 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2765 		break;
2766 	case 270000:
2767 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2768 		break;
2769 		/* eDP 1.4 rates */
2770 	case 162000:
2771 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2772 		break;
2773 	case 108000:
2774 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2775 		break;
2776 	case 216000:
2777 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2778 		break;
2779 	case 324000:
2780 		/* Some SKUs may require elevated I/O voltage to support this */
2781 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2782 		break;
2783 	case 405000:
2784 		/* Some SKUs may require elevated I/O voltage to support this */
2785 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2786 		break;
2787 	}
2788 
2789 	memset(&crtc_state->dpll_hw_state, 0,
2790 	       sizeof(crtc_state->dpll_hw_state));
2791 
2792 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2793 
2794 	return true;
2795 }
2796 
2797 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2798 				  const struct intel_shared_dpll *pll,
2799 				  const struct intel_dpll_hw_state *pll_state)
2800 {
2801 	int link_clock = 0;
2802 
2803 	switch (pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2804 	case DPLL_CFGCR0_LINK_RATE_810:
2805 		link_clock = 81000;
2806 		break;
2807 	case DPLL_CFGCR0_LINK_RATE_1080:
2808 		link_clock = 108000;
2809 		break;
2810 	case DPLL_CFGCR0_LINK_RATE_1350:
2811 		link_clock = 135000;
2812 		break;
2813 	case DPLL_CFGCR0_LINK_RATE_1620:
2814 		link_clock = 162000;
2815 		break;
2816 	case DPLL_CFGCR0_LINK_RATE_2160:
2817 		link_clock = 216000;
2818 		break;
2819 	case DPLL_CFGCR0_LINK_RATE_2700:
2820 		link_clock = 270000;
2821 		break;
2822 	case DPLL_CFGCR0_LINK_RATE_3240:
2823 		link_clock = 324000;
2824 		break;
2825 	case DPLL_CFGCR0_LINK_RATE_4050:
2826 		link_clock = 405000;
2827 		break;
2828 	default:
2829 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2830 		break;
2831 	}
2832 
2833 	return link_clock * 2;
2834 }
2835 
2836 static bool cnl_get_dpll(struct intel_atomic_state *state,
2837 			 struct intel_crtc *crtc,
2838 			 struct intel_encoder *encoder)
2839 {
2840 	struct intel_crtc_state *crtc_state =
2841 		intel_atomic_get_new_crtc_state(state, crtc);
2842 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2843 	struct intel_shared_dpll *pll;
2844 	bool bret;
2845 
2846 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2847 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2848 		if (!bret) {
2849 			drm_dbg_kms(&i915->drm,
2850 				    "Could not get HDMI pll dividers.\n");
2851 			return false;
2852 		}
2853 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2854 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2855 		if (!bret) {
2856 			drm_dbg_kms(&i915->drm,
2857 				    "Could not set DP dpll HW state.\n");
2858 			return false;
2859 		}
2860 	} else {
2861 		drm_dbg_kms(&i915->drm,
2862 			    "Skip DPLL setup for output_types 0x%x\n",
2863 			    crtc_state->output_types);
2864 		return false;
2865 	}
2866 
2867 	pll = intel_find_shared_dpll(state, crtc,
2868 				     &crtc_state->dpll_hw_state,
2869 				     BIT(DPLL_ID_SKL_DPLL2) |
2870 				     BIT(DPLL_ID_SKL_DPLL1) |
2871 				     BIT(DPLL_ID_SKL_DPLL0));
2872 	if (!pll) {
2873 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2874 		return false;
2875 	}
2876 
2877 	intel_reference_shared_dpll(state, crtc,
2878 				    pll, &crtc_state->dpll_hw_state);
2879 
2880 	crtc_state->shared_dpll = pll;
2881 
2882 	return true;
2883 }
2884 
2885 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2886 				const struct intel_shared_dpll *pll,
2887 				const struct intel_dpll_hw_state *pll_state)
2888 {
2889 	if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2890 		return cnl_ddi_wrpll_get_freq(i915, pll, pll_state);
2891 	else
2892 		return cnl_ddi_lcpll_get_freq(i915, pll, pll_state);
2893 }
2894 
2895 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2896 {
2897 	/* No SSC reference */
2898 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2899 }
2900 
2901 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2902 			      const struct intel_dpll_hw_state *hw_state)
2903 {
2904 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2905 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2906 		    hw_state->cfgcr0,
2907 		    hw_state->cfgcr1);
2908 }
2909 
2910 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2911 	.enable = cnl_ddi_pll_enable,
2912 	.disable = cnl_ddi_pll_disable,
2913 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2914 	.get_freq = cnl_ddi_pll_get_freq,
2915 };
2916 
2917 static const struct dpll_info cnl_plls[] = {
2918 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2919 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2920 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2921 	{ },
2922 };
2923 
2924 static const struct intel_dpll_mgr cnl_pll_mgr = {
2925 	.dpll_info = cnl_plls,
2926 	.get_dplls = cnl_get_dpll,
2927 	.put_dplls = intel_put_dpll,
2928 	.update_ref_clks = cnl_update_dpll_ref_clks,
2929 	.dump_hw_state = cnl_dump_hw_state,
2930 };
2931 
2932 struct icl_combo_pll_params {
2933 	int clock;
2934 	struct skl_wrpll_params wrpll;
2935 };
2936 
2937 /*
2938  * These values alrea already adjusted: they're the bits we write to the
2939  * registers, not the logical values.
2940  */
2941 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2942 	{ 540000,
2943 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2944 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2945 	{ 270000,
2946 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2947 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2948 	{ 162000,
2949 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2950 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2951 	{ 324000,
2952 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2953 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2954 	{ 216000,
2955 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2956 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2957 	{ 432000,
2958 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2959 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2960 	{ 648000,
2961 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2962 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2963 	{ 810000,
2964 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2965 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2966 };
2967 
2968 
2969 /* Also used for 38.4 MHz values. */
2970 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2971 	{ 540000,
2972 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2973 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2974 	{ 270000,
2975 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2976 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2977 	{ 162000,
2978 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2979 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2980 	{ 324000,
2981 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2982 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2983 	{ 216000,
2984 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2985 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2986 	{ 432000,
2987 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2988 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2989 	{ 648000,
2990 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2991 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2992 	{ 810000,
2993 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2994 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2995 };
2996 
2997 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2998 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2999 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
3000 };
3001 
3002 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
3003 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
3004 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
3005 };
3006 
3007 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
3008 	.dco_integer = 0x54, .dco_fraction = 0x3000,
3009 	/* the following params are unused */
3010 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
3011 };
3012 
3013 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
3014 	.dco_integer = 0x43, .dco_fraction = 0x4000,
3015 	/* the following params are unused */
3016 };
3017 
3018 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
3019 				  struct skl_wrpll_params *pll_params)
3020 {
3021 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3022 	const struct icl_combo_pll_params *params =
3023 		dev_priv->dpll.ref_clks.nssc == 24000 ?
3024 		icl_dp_combo_pll_24MHz_values :
3025 		icl_dp_combo_pll_19_2MHz_values;
3026 	int clock = crtc_state->port_clock;
3027 	int i;
3028 
3029 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
3030 		if (clock == params[i].clock) {
3031 			*pll_params = params[i].wrpll;
3032 			return true;
3033 		}
3034 	}
3035 
3036 	MISSING_CASE(clock);
3037 	return false;
3038 }
3039 
3040 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
3041 			     struct skl_wrpll_params *pll_params)
3042 {
3043 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3044 
3045 	if (DISPLAY_VER(dev_priv) >= 12) {
3046 		switch (dev_priv->dpll.ref_clks.nssc) {
3047 		default:
3048 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3049 			fallthrough;
3050 		case 19200:
3051 		case 38400:
3052 			*pll_params = tgl_tbt_pll_19_2MHz_values;
3053 			break;
3054 		case 24000:
3055 			*pll_params = tgl_tbt_pll_24MHz_values;
3056 			break;
3057 		}
3058 	} else {
3059 		switch (dev_priv->dpll.ref_clks.nssc) {
3060 		default:
3061 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3062 			fallthrough;
3063 		case 19200:
3064 		case 38400:
3065 			*pll_params = icl_tbt_pll_19_2MHz_values;
3066 			break;
3067 		case 24000:
3068 			*pll_params = icl_tbt_pll_24MHz_values;
3069 			break;
3070 		}
3071 	}
3072 
3073 	return true;
3074 }
3075 
3076 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3077 				    const struct intel_shared_dpll *pll,
3078 				    const struct intel_dpll_hw_state *pll_state)
3079 {
3080 	/*
3081 	 * The PLL outputs multiple frequencies at the same time, selection is
3082 	 * made at DDI clock mux level.
3083 	 */
3084 	drm_WARN_ON(&i915->drm, 1);
3085 
3086 	return 0;
3087 }
3088 
3089 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3090 {
3091 	int ref_clock = i915->dpll.ref_clks.nssc;
3092 
3093 	/*
3094 	 * For ICL+, the spec states: if reference frequency is 38.4,
3095 	 * use 19.2 because the DPLL automatically divides that by 2.
3096 	 */
3097 	if (ref_clock == 38400)
3098 		ref_clock = 19200;
3099 
3100 	return ref_clock;
3101 }
3102 
3103 static bool
3104 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3105 	       struct skl_wrpll_params *wrpll_params)
3106 {
3107 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3108 
3109 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3110 					 icl_wrpll_ref_clock(i915));
3111 }
3112 
3113 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3114 				      const struct intel_shared_dpll *pll,
3115 				      const struct intel_dpll_hw_state *pll_state)
3116 {
3117 	return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
3118 					icl_wrpll_ref_clock(i915));
3119 }
3120 
3121 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3122 				const struct skl_wrpll_params *pll_params,
3123 				struct intel_dpll_hw_state *pll_state)
3124 {
3125 	u32 dco_fraction = pll_params->dco_fraction;
3126 
3127 	memset(pll_state, 0, sizeof(*pll_state));
3128 
3129 	if (ehl_combo_pll_div_frac_wa_needed(i915))
3130 		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3131 
3132 	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3133 			    pll_params->dco_integer;
3134 
3135 	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3136 			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3137 			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3138 			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
3139 
3140 	if (DISPLAY_VER(i915) >= 12)
3141 		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3142 	else
3143 		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3144 }
3145 
3146 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3147 				     u32 *target_dco_khz,
3148 				     struct intel_dpll_hw_state *state,
3149 				     bool is_dkl)
3150 {
3151 	u32 dco_min_freq, dco_max_freq;
3152 	int div1_vals[] = {7, 5, 3, 2};
3153 	unsigned int i;
3154 	int div2;
3155 
3156 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3157 	dco_max_freq = is_dp ? 8100000 : 10000000;
3158 
3159 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3160 		int div1 = div1_vals[i];
3161 
3162 		for (div2 = 10; div2 > 0; div2--) {
3163 			int dco = div1 * div2 * clock_khz * 5;
3164 			int a_divratio, tlinedrv, inputsel;
3165 			u32 hsdiv;
3166 
3167 			if (dco < dco_min_freq || dco > dco_max_freq)
3168 				continue;
3169 
3170 			if (div2 >= 2) {
3171 				/*
3172 				 * Note: a_divratio not matching TGL BSpec
3173 				 * algorithm but matching hardcoded values and
3174 				 * working on HW for DP alt-mode at least
3175 				 */
3176 				a_divratio = is_dp ? 10 : 5;
3177 				tlinedrv = is_dkl ? 1 : 2;
3178 			} else {
3179 				a_divratio = 5;
3180 				tlinedrv = 0;
3181 			}
3182 			inputsel = is_dp ? 0 : 1;
3183 
3184 			switch (div1) {
3185 			default:
3186 				MISSING_CASE(div1);
3187 				fallthrough;
3188 			case 2:
3189 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3190 				break;
3191 			case 3:
3192 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3193 				break;
3194 			case 5:
3195 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3196 				break;
3197 			case 7:
3198 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3199 				break;
3200 			}
3201 
3202 			*target_dco_khz = dco;
3203 
3204 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3205 
3206 			state->mg_clktop2_coreclkctl1 =
3207 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3208 
3209 			state->mg_clktop2_hsclkctl =
3210 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3211 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3212 				hsdiv |
3213 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3214 
3215 			return true;
3216 		}
3217 	}
3218 
3219 	return false;
3220 }
3221 
3222 /*
3223  * The specification for this function uses real numbers, so the math had to be
3224  * adapted to integer-only calculation, that's why it looks so different.
3225  */
3226 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3227 				  struct intel_dpll_hw_state *pll_state)
3228 {
3229 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3230 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3231 	int clock = crtc_state->port_clock;
3232 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3233 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3234 	u32 prop_coeff, int_coeff;
3235 	u32 tdc_targetcnt, feedfwgain;
3236 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3237 	u64 tmp;
3238 	bool use_ssc = false;
3239 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3240 	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
3241 
3242 	memset(pll_state, 0, sizeof(*pll_state));
3243 
3244 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3245 				      pll_state, is_dkl)) {
3246 		drm_dbg_kms(&dev_priv->drm,
3247 			    "Failed to find divisors for clock %d\n", clock);
3248 		return false;
3249 	}
3250 
3251 	m1div = 2;
3252 	m2div_int = dco_khz / (refclk_khz * m1div);
3253 	if (m2div_int > 255) {
3254 		if (!is_dkl) {
3255 			m1div = 4;
3256 			m2div_int = dco_khz / (refclk_khz * m1div);
3257 		}
3258 
3259 		if (m2div_int > 255) {
3260 			drm_dbg_kms(&dev_priv->drm,
3261 				    "Failed to find mdiv for clock %d\n",
3262 				    clock);
3263 			return false;
3264 		}
3265 	}
3266 	m2div_rem = dco_khz % (refclk_khz * m1div);
3267 
3268 	tmp = (u64)m2div_rem * (1 << 22);
3269 	do_div(tmp, refclk_khz * m1div);
3270 	m2div_frac = tmp;
3271 
3272 	switch (refclk_khz) {
3273 	case 19200:
3274 		iref_ndiv = 1;
3275 		iref_trim = 28;
3276 		iref_pulse_w = 1;
3277 		break;
3278 	case 24000:
3279 		iref_ndiv = 1;
3280 		iref_trim = 25;
3281 		iref_pulse_w = 2;
3282 		break;
3283 	case 38400:
3284 		iref_ndiv = 2;
3285 		iref_trim = 28;
3286 		iref_pulse_w = 1;
3287 		break;
3288 	default:
3289 		MISSING_CASE(refclk_khz);
3290 		return false;
3291 	}
3292 
3293 	/*
3294 	 * tdc_res = 0.000003
3295 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3296 	 *
3297 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3298 	 * was supposed to be a division, but we rearranged the operations of
3299 	 * the formula to avoid early divisions so we don't multiply the
3300 	 * rounding errors.
3301 	 *
3302 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3303 	 * we also rearrange to work with integers.
3304 	 *
3305 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3306 	 * last division by 10.
3307 	 */
3308 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3309 
3310 	/*
3311 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3312 	 * 32 bits. That's not a problem since we round the division down
3313 	 * anyway.
3314 	 */
3315 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3316 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3317 
3318 	if (dco_khz >= 9000000) {
3319 		prop_coeff = 5;
3320 		int_coeff = 10;
3321 	} else {
3322 		prop_coeff = 4;
3323 		int_coeff = 8;
3324 	}
3325 
3326 	if (use_ssc) {
3327 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3328 		do_div(tmp, refclk_khz * m1div * 10000);
3329 		ssc_stepsize = tmp;
3330 
3331 		tmp = mul_u32_u32(dco_khz, 1000);
3332 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3333 	} else {
3334 		ssc_stepsize = 0;
3335 		ssc_steplen = 0;
3336 	}
3337 	ssc_steplog = 4;
3338 
3339 	/* write pll_state calculations */
3340 	if (is_dkl) {
3341 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3342 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3343 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3344 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3345 
3346 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3347 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3348 
3349 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3350 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3351 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3352 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3353 
3354 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3355 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3356 
3357 		pll_state->mg_pll_tdc_coldst_bias =
3358 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3359 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3360 
3361 	} else {
3362 		pll_state->mg_pll_div0 =
3363 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3364 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3365 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3366 
3367 		pll_state->mg_pll_div1 =
3368 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3369 			MG_PLL_DIV1_DITHER_DIV_2 |
3370 			MG_PLL_DIV1_NDIVRATIO(1) |
3371 			MG_PLL_DIV1_FBPREDIV(m1div);
3372 
3373 		pll_state->mg_pll_lf =
3374 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3375 			MG_PLL_LF_AFCCNTSEL_512 |
3376 			MG_PLL_LF_GAINCTRL(1) |
3377 			MG_PLL_LF_INT_COEFF(int_coeff) |
3378 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3379 
3380 		pll_state->mg_pll_frac_lock =
3381 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3382 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3383 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3384 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3385 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3386 		if (use_ssc || m2div_rem > 0)
3387 			pll_state->mg_pll_frac_lock |=
3388 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3389 
3390 		pll_state->mg_pll_ssc =
3391 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3392 			MG_PLL_SSC_TYPE(2) |
3393 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3394 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3395 			MG_PLL_SSC_FLLEN |
3396 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3397 
3398 		pll_state->mg_pll_tdc_coldst_bias =
3399 			MG_PLL_TDC_COLDST_COLDSTART |
3400 			MG_PLL_TDC_COLDST_IREFINT_EN |
3401 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3402 			MG_PLL_TDC_TDCOVCCORR_EN |
3403 			MG_PLL_TDC_TDCSEL(3);
3404 
3405 		pll_state->mg_pll_bias =
3406 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3407 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3408 			MG_PLL_BIAS_BIAS_BONUS(10) |
3409 			MG_PLL_BIAS_BIASCAL_EN |
3410 			MG_PLL_BIAS_CTRIM(12) |
3411 			MG_PLL_BIAS_VREF_RDAC(4) |
3412 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3413 
3414 		if (refclk_khz == 38400) {
3415 			pll_state->mg_pll_tdc_coldst_bias_mask =
3416 				MG_PLL_TDC_COLDST_COLDSTART;
3417 			pll_state->mg_pll_bias_mask = 0;
3418 		} else {
3419 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3420 			pll_state->mg_pll_bias_mask = -1U;
3421 		}
3422 
3423 		pll_state->mg_pll_tdc_coldst_bias &=
3424 			pll_state->mg_pll_tdc_coldst_bias_mask;
3425 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3426 	}
3427 
3428 	return true;
3429 }
3430 
3431 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3432 				   const struct intel_shared_dpll *pll,
3433 				   const struct intel_dpll_hw_state *pll_state)
3434 {
3435 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3436 	u64 tmp;
3437 
3438 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3439 
3440 	if (DISPLAY_VER(dev_priv) >= 12) {
3441 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3442 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3443 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3444 
3445 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3446 			m2_frac = pll_state->mg_pll_bias &
3447 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3448 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3449 		} else {
3450 			m2_frac = 0;
3451 		}
3452 	} else {
3453 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3454 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3455 
3456 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3457 			m2_frac = pll_state->mg_pll_div0 &
3458 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3459 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3460 		} else {
3461 			m2_frac = 0;
3462 		}
3463 	}
3464 
3465 	switch (pll_state->mg_clktop2_hsclkctl &
3466 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3467 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3468 		div1 = 2;
3469 		break;
3470 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3471 		div1 = 3;
3472 		break;
3473 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3474 		div1 = 5;
3475 		break;
3476 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3477 		div1 = 7;
3478 		break;
3479 	default:
3480 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3481 		return 0;
3482 	}
3483 
3484 	div2 = (pll_state->mg_clktop2_hsclkctl &
3485 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3486 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3487 
3488 	/* div2 value of 0 is same as 1 means no div */
3489 	if (div2 == 0)
3490 		div2 = 1;
3491 
3492 	/*
3493 	 * Adjust the original formula to delay the division by 2^22 in order to
3494 	 * minimize possible rounding errors.
3495 	 */
3496 	tmp = (u64)m1 * m2_int * ref_clock +
3497 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3498 	tmp = div_u64(tmp, 5 * div1 * div2);
3499 
3500 	return tmp;
3501 }
3502 
3503 /**
3504  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3505  * @crtc_state: state for the CRTC to select the DPLL for
3506  * @port_dpll_id: the active @port_dpll_id to select
3507  *
3508  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3509  * CRTC.
3510  */
3511 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3512 			      enum icl_port_dpll_id port_dpll_id)
3513 {
3514 	struct icl_port_dpll *port_dpll =
3515 		&crtc_state->icl_port_dplls[port_dpll_id];
3516 
3517 	crtc_state->shared_dpll = port_dpll->pll;
3518 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3519 }
3520 
3521 static void icl_update_active_dpll(struct intel_atomic_state *state,
3522 				   struct intel_crtc *crtc,
3523 				   struct intel_encoder *encoder)
3524 {
3525 	struct intel_crtc_state *crtc_state =
3526 		intel_atomic_get_new_crtc_state(state, crtc);
3527 	struct intel_digital_port *primary_port;
3528 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3529 
3530 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3531 		enc_to_mst(encoder)->primary :
3532 		enc_to_dig_port(encoder);
3533 
3534 	if (primary_port &&
3535 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3536 	     primary_port->tc_mode == TC_PORT_LEGACY))
3537 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3538 
3539 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3540 }
3541 
3542 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3543 {
3544 	if (!(i915->hti_state & HDPORT_ENABLED))
3545 		return 0;
3546 
3547 	return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3548 }
3549 
3550 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3551 				   struct intel_crtc *crtc,
3552 				   struct intel_encoder *encoder)
3553 {
3554 	struct intel_crtc_state *crtc_state =
3555 		intel_atomic_get_new_crtc_state(state, crtc);
3556 	struct skl_wrpll_params pll_params = { };
3557 	struct icl_port_dpll *port_dpll =
3558 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3559 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3560 	enum port port = encoder->port;
3561 	unsigned long dpll_mask;
3562 	int ret;
3563 
3564 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3565 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3566 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3567 	else
3568 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3569 
3570 	if (!ret) {
3571 		drm_dbg_kms(&dev_priv->drm,
3572 			    "Could not calculate combo PHY PLL state.\n");
3573 
3574 		return false;
3575 	}
3576 
3577 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3578 
3579 	if (IS_ALDERLAKE_S(dev_priv)) {
3580 		dpll_mask =
3581 			BIT(DPLL_ID_DG1_DPLL3) |
3582 			BIT(DPLL_ID_DG1_DPLL2) |
3583 			BIT(DPLL_ID_ICL_DPLL1) |
3584 			BIT(DPLL_ID_ICL_DPLL0);
3585 	} else if (IS_DG1(dev_priv)) {
3586 		if (port == PORT_D || port == PORT_E) {
3587 			dpll_mask =
3588 				BIT(DPLL_ID_DG1_DPLL2) |
3589 				BIT(DPLL_ID_DG1_DPLL3);
3590 		} else {
3591 			dpll_mask =
3592 				BIT(DPLL_ID_DG1_DPLL0) |
3593 				BIT(DPLL_ID_DG1_DPLL1);
3594 		}
3595 	} else if (IS_ROCKETLAKE(dev_priv)) {
3596 		dpll_mask =
3597 			BIT(DPLL_ID_EHL_DPLL4) |
3598 			BIT(DPLL_ID_ICL_DPLL1) |
3599 			BIT(DPLL_ID_ICL_DPLL0);
3600 	} else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3601 		dpll_mask =
3602 			BIT(DPLL_ID_EHL_DPLL4) |
3603 			BIT(DPLL_ID_ICL_DPLL1) |
3604 			BIT(DPLL_ID_ICL_DPLL0);
3605 	} else {
3606 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3607 	}
3608 
3609 	/* Eliminate DPLLs from consideration if reserved by HTI */
3610 	dpll_mask &= ~intel_get_hti_plls(dev_priv);
3611 
3612 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3613 						&port_dpll->hw_state,
3614 						dpll_mask);
3615 	if (!port_dpll->pll) {
3616 		drm_dbg_kms(&dev_priv->drm,
3617 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3618 			    encoder->base.base.id, encoder->base.name);
3619 		return false;
3620 	}
3621 
3622 	intel_reference_shared_dpll(state, crtc,
3623 				    port_dpll->pll, &port_dpll->hw_state);
3624 
3625 	icl_update_active_dpll(state, crtc, encoder);
3626 
3627 	return true;
3628 }
3629 
3630 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3631 				 struct intel_crtc *crtc,
3632 				 struct intel_encoder *encoder)
3633 {
3634 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3635 	struct intel_crtc_state *crtc_state =
3636 		intel_atomic_get_new_crtc_state(state, crtc);
3637 	struct skl_wrpll_params pll_params = { };
3638 	struct icl_port_dpll *port_dpll;
3639 	enum intel_dpll_id dpll_id;
3640 
3641 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3642 	if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3643 		drm_dbg_kms(&dev_priv->drm,
3644 			    "Could not calculate TBT PLL state.\n");
3645 		return false;
3646 	}
3647 
3648 	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3649 
3650 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3651 						&port_dpll->hw_state,
3652 						BIT(DPLL_ID_ICL_TBTPLL));
3653 	if (!port_dpll->pll) {
3654 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3655 		return false;
3656 	}
3657 	intel_reference_shared_dpll(state, crtc,
3658 				    port_dpll->pll, &port_dpll->hw_state);
3659 
3660 
3661 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3662 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3663 		drm_dbg_kms(&dev_priv->drm,
3664 			    "Could not calculate MG PHY PLL state.\n");
3665 		goto err_unreference_tbt_pll;
3666 	}
3667 
3668 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3669 							 encoder->port));
3670 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3671 						&port_dpll->hw_state,
3672 						BIT(dpll_id));
3673 	if (!port_dpll->pll) {
3674 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3675 		goto err_unreference_tbt_pll;
3676 	}
3677 	intel_reference_shared_dpll(state, crtc,
3678 				    port_dpll->pll, &port_dpll->hw_state);
3679 
3680 	icl_update_active_dpll(state, crtc, encoder);
3681 
3682 	return true;
3683 
3684 err_unreference_tbt_pll:
3685 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3686 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3687 
3688 	return false;
3689 }
3690 
3691 static bool icl_get_dplls(struct intel_atomic_state *state,
3692 			  struct intel_crtc *crtc,
3693 			  struct intel_encoder *encoder)
3694 {
3695 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3696 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3697 
3698 	if (intel_phy_is_combo(dev_priv, phy))
3699 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3700 	else if (intel_phy_is_tc(dev_priv, phy))
3701 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3702 
3703 	MISSING_CASE(phy);
3704 
3705 	return false;
3706 }
3707 
3708 static void icl_put_dplls(struct intel_atomic_state *state,
3709 			  struct intel_crtc *crtc)
3710 {
3711 	const struct intel_crtc_state *old_crtc_state =
3712 		intel_atomic_get_old_crtc_state(state, crtc);
3713 	struct intel_crtc_state *new_crtc_state =
3714 		intel_atomic_get_new_crtc_state(state, crtc);
3715 	enum icl_port_dpll_id id;
3716 
3717 	new_crtc_state->shared_dpll = NULL;
3718 
3719 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3720 		const struct icl_port_dpll *old_port_dpll =
3721 			&old_crtc_state->icl_port_dplls[id];
3722 		struct icl_port_dpll *new_port_dpll =
3723 			&new_crtc_state->icl_port_dplls[id];
3724 
3725 		new_port_dpll->pll = NULL;
3726 
3727 		if (!old_port_dpll->pll)
3728 			continue;
3729 
3730 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3731 	}
3732 }
3733 
3734 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3735 				struct intel_shared_dpll *pll,
3736 				struct intel_dpll_hw_state *hw_state)
3737 {
3738 	const enum intel_dpll_id id = pll->info->id;
3739 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3740 	intel_wakeref_t wakeref;
3741 	bool ret = false;
3742 	u32 val;
3743 
3744 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3745 
3746 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3747 						     POWER_DOMAIN_DISPLAY_CORE);
3748 	if (!wakeref)
3749 		return false;
3750 
3751 	val = intel_de_read(dev_priv, enable_reg);
3752 	if (!(val & PLL_ENABLE))
3753 		goto out;
3754 
3755 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3756 						  MG_REFCLKIN_CTL(tc_port));
3757 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3758 
3759 	hw_state->mg_clktop2_coreclkctl1 =
3760 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3761 	hw_state->mg_clktop2_coreclkctl1 &=
3762 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3763 
3764 	hw_state->mg_clktop2_hsclkctl =
3765 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3766 	hw_state->mg_clktop2_hsclkctl &=
3767 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3768 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3769 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3770 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3771 
3772 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3773 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3774 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3775 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3776 						   MG_PLL_FRAC_LOCK(tc_port));
3777 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3778 
3779 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3780 	hw_state->mg_pll_tdc_coldst_bias =
3781 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3782 
3783 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3784 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3785 		hw_state->mg_pll_bias_mask = 0;
3786 	} else {
3787 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3788 		hw_state->mg_pll_bias_mask = -1U;
3789 	}
3790 
3791 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3792 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3793 
3794 	ret = true;
3795 out:
3796 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3797 	return ret;
3798 }
3799 
3800 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3801 				 struct intel_shared_dpll *pll,
3802 				 struct intel_dpll_hw_state *hw_state)
3803 {
3804 	const enum intel_dpll_id id = pll->info->id;
3805 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3806 	intel_wakeref_t wakeref;
3807 	bool ret = false;
3808 	u32 val;
3809 
3810 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3811 						     POWER_DOMAIN_DISPLAY_CORE);
3812 	if (!wakeref)
3813 		return false;
3814 
3815 	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3816 	if (!(val & PLL_ENABLE))
3817 		goto out;
3818 
3819 	/*
3820 	 * All registers read here have the same HIP_INDEX_REG even though
3821 	 * they are on different building blocks
3822 	 */
3823 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3824 		       HIP_INDEX_VAL(tc_port, 0x2));
3825 
3826 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3827 						  DKL_REFCLKIN_CTL(tc_port));
3828 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3829 
3830 	hw_state->mg_clktop2_hsclkctl =
3831 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3832 	hw_state->mg_clktop2_hsclkctl &=
3833 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3834 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3835 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3836 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3837 
3838 	hw_state->mg_clktop2_coreclkctl1 =
3839 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3840 	hw_state->mg_clktop2_coreclkctl1 &=
3841 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3842 
3843 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3844 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3845 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3846 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3847 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3848 
3849 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3850 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3851 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3852 
3853 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3854 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3855 				 DKL_PLL_SSC_STEP_LEN_MASK |
3856 				 DKL_PLL_SSC_STEP_NUM_MASK |
3857 				 DKL_PLL_SSC_EN);
3858 
3859 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3860 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3861 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3862 
3863 	hw_state->mg_pll_tdc_coldst_bias =
3864 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3865 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3866 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3867 
3868 	ret = true;
3869 out:
3870 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3871 	return ret;
3872 }
3873 
3874 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3875 				 struct intel_shared_dpll *pll,
3876 				 struct intel_dpll_hw_state *hw_state,
3877 				 i915_reg_t enable_reg)
3878 {
3879 	const enum intel_dpll_id id = pll->info->id;
3880 	intel_wakeref_t wakeref;
3881 	bool ret = false;
3882 	u32 val;
3883 
3884 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3885 						     POWER_DOMAIN_DISPLAY_CORE);
3886 	if (!wakeref)
3887 		return false;
3888 
3889 	val = intel_de_read(dev_priv, enable_reg);
3890 	if (!(val & PLL_ENABLE))
3891 		goto out;
3892 
3893 	if (IS_ALDERLAKE_S(dev_priv)) {
3894 		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
3895 		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
3896 	} else if (IS_DG1(dev_priv)) {
3897 		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3898 		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3899 	} else if (IS_ROCKETLAKE(dev_priv)) {
3900 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3901 						 RKL_DPLL_CFGCR0(id));
3902 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3903 						 RKL_DPLL_CFGCR1(id));
3904 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3905 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3906 						 TGL_DPLL_CFGCR0(id));
3907 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3908 						 TGL_DPLL_CFGCR1(id));
3909 	} else {
3910 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3911 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3912 							 ICL_DPLL_CFGCR0(4));
3913 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3914 							 ICL_DPLL_CFGCR1(4));
3915 		} else {
3916 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3917 							 ICL_DPLL_CFGCR0(id));
3918 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3919 							 ICL_DPLL_CFGCR1(id));
3920 		}
3921 	}
3922 
3923 	ret = true;
3924 out:
3925 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3926 	return ret;
3927 }
3928 
3929 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3930 				   struct intel_shared_dpll *pll,
3931 				   struct intel_dpll_hw_state *hw_state)
3932 {
3933 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3934 
3935 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3936 }
3937 
3938 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3939 				 struct intel_shared_dpll *pll,
3940 				 struct intel_dpll_hw_state *hw_state)
3941 {
3942 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3943 }
3944 
3945 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3946 			   struct intel_shared_dpll *pll)
3947 {
3948 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3949 	const enum intel_dpll_id id = pll->info->id;
3950 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3951 
3952 	if (IS_ALDERLAKE_S(dev_priv)) {
3953 		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
3954 		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
3955 	} else if (IS_DG1(dev_priv)) {
3956 		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3957 		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3958 	} else if (IS_ROCKETLAKE(dev_priv)) {
3959 		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3960 		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3961 	} else if (DISPLAY_VER(dev_priv) >= 12) {
3962 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3963 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3964 	} else {
3965 		if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3966 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3967 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3968 		} else {
3969 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3970 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3971 		}
3972 	}
3973 
3974 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3975 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3976 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3977 }
3978 
3979 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3980 			     struct intel_shared_dpll *pll)
3981 {
3982 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3983 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3984 	u32 val;
3985 
3986 	/*
3987 	 * Some of the following registers have reserved fields, so program
3988 	 * these with RMW based on a mask. The mask can be fixed or generated
3989 	 * during the calc/readout phase if the mask depends on some other HW
3990 	 * state like refclk, see icl_calc_mg_pll_state().
3991 	 */
3992 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3993 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3994 	val |= hw_state->mg_refclkin_ctl;
3995 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3996 
3997 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3998 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3999 	val |= hw_state->mg_clktop2_coreclkctl1;
4000 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
4001 
4002 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
4003 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4004 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4005 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4006 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4007 	val |= hw_state->mg_clktop2_hsclkctl;
4008 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
4009 
4010 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
4011 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
4012 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
4013 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
4014 		       hw_state->mg_pll_frac_lock);
4015 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
4016 
4017 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
4018 	val &= ~hw_state->mg_pll_bias_mask;
4019 	val |= hw_state->mg_pll_bias;
4020 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
4021 
4022 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4023 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
4024 	val |= hw_state->mg_pll_tdc_coldst_bias;
4025 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
4026 
4027 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
4028 }
4029 
4030 static void dkl_pll_write(struct drm_i915_private *dev_priv,
4031 			  struct intel_shared_dpll *pll)
4032 {
4033 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
4034 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
4035 	u32 val;
4036 
4037 	/*
4038 	 * All registers programmed here have the same HIP_INDEX_REG even
4039 	 * though on different building block
4040 	 */
4041 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
4042 		       HIP_INDEX_VAL(tc_port, 0x2));
4043 
4044 	/* All the registers are RMW */
4045 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
4046 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
4047 	val |= hw_state->mg_refclkin_ctl;
4048 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
4049 
4050 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
4051 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
4052 	val |= hw_state->mg_clktop2_coreclkctl1;
4053 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
4054 
4055 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
4056 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4057 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4058 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4059 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4060 	val |= hw_state->mg_clktop2_hsclkctl;
4061 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
4062 
4063 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
4064 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
4065 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
4066 		 DKL_PLL_DIV0_FBPREDIV_MASK |
4067 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
4068 	val |= hw_state->mg_pll_div0;
4069 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
4070 
4071 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
4072 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4073 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4074 	val |= hw_state->mg_pll_div1;
4075 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4076 
4077 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4078 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4079 		 DKL_PLL_SSC_STEP_LEN_MASK |
4080 		 DKL_PLL_SSC_STEP_NUM_MASK |
4081 		 DKL_PLL_SSC_EN);
4082 	val |= hw_state->mg_pll_ssc;
4083 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4084 
4085 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4086 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4087 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4088 	val |= hw_state->mg_pll_bias;
4089 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4090 
4091 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4092 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4093 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4094 	val |= hw_state->mg_pll_tdc_coldst_bias;
4095 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4096 
4097 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4098 }
4099 
4100 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4101 				 struct intel_shared_dpll *pll,
4102 				 i915_reg_t enable_reg)
4103 {
4104 	u32 val;
4105 
4106 	val = intel_de_read(dev_priv, enable_reg);
4107 	val |= PLL_POWER_ENABLE;
4108 	intel_de_write(dev_priv, enable_reg, val);
4109 
4110 	/*
4111 	 * The spec says we need to "wait" but it also says it should be
4112 	 * immediate.
4113 	 */
4114 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4115 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4116 			pll->info->id);
4117 }
4118 
4119 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4120 			   struct intel_shared_dpll *pll,
4121 			   i915_reg_t enable_reg)
4122 {
4123 	u32 val;
4124 
4125 	val = intel_de_read(dev_priv, enable_reg);
4126 	val |= PLL_ENABLE;
4127 	intel_de_write(dev_priv, enable_reg, val);
4128 
4129 	/* Timeout is actually 600us. */
4130 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4131 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4132 }
4133 
4134 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4135 			     struct intel_shared_dpll *pll)
4136 {
4137 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4138 
4139 	if (IS_JSL_EHL(dev_priv) &&
4140 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4141 
4142 		/*
4143 		 * We need to disable DC states when this DPLL is enabled.
4144 		 * This can be done by taking a reference on DPLL4 power
4145 		 * domain.
4146 		 */
4147 		pll->wakeref = intel_display_power_get(dev_priv,
4148 						       POWER_DOMAIN_DPLL_DC_OFF);
4149 	}
4150 
4151 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4152 
4153 	icl_dpll_write(dev_priv, pll);
4154 
4155 	/*
4156 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4157 	 * paths should already be setting the appropriate voltage, hence we do
4158 	 * nothing here.
4159 	 */
4160 
4161 	icl_pll_enable(dev_priv, pll, enable_reg);
4162 
4163 	/* DVFS post sequence would be here. See the comment above. */
4164 }
4165 
4166 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4167 			   struct intel_shared_dpll *pll)
4168 {
4169 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4170 
4171 	icl_dpll_write(dev_priv, pll);
4172 
4173 	/*
4174 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4175 	 * paths should already be setting the appropriate voltage, hence we do
4176 	 * nothing here.
4177 	 */
4178 
4179 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4180 
4181 	/* DVFS post sequence would be here. See the comment above. */
4182 }
4183 
4184 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4185 			  struct intel_shared_dpll *pll)
4186 {
4187 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
4188 
4189 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4190 
4191 	if (DISPLAY_VER(dev_priv) >= 12)
4192 		dkl_pll_write(dev_priv, pll);
4193 	else
4194 		icl_mg_pll_write(dev_priv, pll);
4195 
4196 	/*
4197 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4198 	 * paths should already be setting the appropriate voltage, hence we do
4199 	 * nothing here.
4200 	 */
4201 
4202 	icl_pll_enable(dev_priv, pll, enable_reg);
4203 
4204 	/* DVFS post sequence would be here. See the comment above. */
4205 }
4206 
4207 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4208 			    struct intel_shared_dpll *pll,
4209 			    i915_reg_t enable_reg)
4210 {
4211 	u32 val;
4212 
4213 	/* The first steps are done by intel_ddi_post_disable(). */
4214 
4215 	/*
4216 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4217 	 * paths should already be setting the appropriate voltage, hence we do
4218 	 * nothing here.
4219 	 */
4220 
4221 	val = intel_de_read(dev_priv, enable_reg);
4222 	val &= ~PLL_ENABLE;
4223 	intel_de_write(dev_priv, enable_reg, val);
4224 
4225 	/* Timeout is actually 1us. */
4226 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4227 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4228 
4229 	/* DVFS post sequence would be here. See the comment above. */
4230 
4231 	val = intel_de_read(dev_priv, enable_reg);
4232 	val &= ~PLL_POWER_ENABLE;
4233 	intel_de_write(dev_priv, enable_reg, val);
4234 
4235 	/*
4236 	 * The spec says we need to "wait" but it also says it should be
4237 	 * immediate.
4238 	 */
4239 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4240 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4241 			pll->info->id);
4242 }
4243 
4244 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4245 			      struct intel_shared_dpll *pll)
4246 {
4247 	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4248 
4249 	icl_pll_disable(dev_priv, pll, enable_reg);
4250 
4251 	if (IS_JSL_EHL(dev_priv) &&
4252 	    pll->info->id == DPLL_ID_EHL_DPLL4)
4253 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4254 					pll->wakeref);
4255 }
4256 
4257 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4258 			    struct intel_shared_dpll *pll)
4259 {
4260 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4261 }
4262 
4263 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4264 			   struct intel_shared_dpll *pll)
4265 {
4266 	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
4267 
4268 	icl_pll_disable(dev_priv, pll, enable_reg);
4269 }
4270 
4271 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4272 {
4273 	/* No SSC ref */
4274 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4275 }
4276 
4277 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4278 			      const struct intel_dpll_hw_state *hw_state)
4279 {
4280 	drm_dbg_kms(&dev_priv->drm,
4281 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4282 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4283 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4284 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4285 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4286 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4287 		    hw_state->cfgcr0, hw_state->cfgcr1,
4288 		    hw_state->mg_refclkin_ctl,
4289 		    hw_state->mg_clktop2_coreclkctl1,
4290 		    hw_state->mg_clktop2_hsclkctl,
4291 		    hw_state->mg_pll_div0,
4292 		    hw_state->mg_pll_div1,
4293 		    hw_state->mg_pll_lf,
4294 		    hw_state->mg_pll_frac_lock,
4295 		    hw_state->mg_pll_ssc,
4296 		    hw_state->mg_pll_bias,
4297 		    hw_state->mg_pll_tdc_coldst_bias);
4298 }
4299 
4300 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4301 	.enable = combo_pll_enable,
4302 	.disable = combo_pll_disable,
4303 	.get_hw_state = combo_pll_get_hw_state,
4304 	.get_freq = icl_ddi_combo_pll_get_freq,
4305 };
4306 
4307 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4308 	.enable = tbt_pll_enable,
4309 	.disable = tbt_pll_disable,
4310 	.get_hw_state = tbt_pll_get_hw_state,
4311 	.get_freq = icl_ddi_tbt_pll_get_freq,
4312 };
4313 
4314 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4315 	.enable = mg_pll_enable,
4316 	.disable = mg_pll_disable,
4317 	.get_hw_state = mg_pll_get_hw_state,
4318 	.get_freq = icl_ddi_mg_pll_get_freq,
4319 };
4320 
4321 static const struct dpll_info icl_plls[] = {
4322 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4323 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4324 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4325 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4326 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4327 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4328 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4329 	{ },
4330 };
4331 
4332 static const struct intel_dpll_mgr icl_pll_mgr = {
4333 	.dpll_info = icl_plls,
4334 	.get_dplls = icl_get_dplls,
4335 	.put_dplls = icl_put_dplls,
4336 	.update_active_dpll = icl_update_active_dpll,
4337 	.update_ref_clks = icl_update_dpll_ref_clks,
4338 	.dump_hw_state = icl_dump_hw_state,
4339 };
4340 
4341 static const struct dpll_info ehl_plls[] = {
4342 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4343 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4344 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4345 	{ },
4346 };
4347 
4348 static const struct intel_dpll_mgr ehl_pll_mgr = {
4349 	.dpll_info = ehl_plls,
4350 	.get_dplls = icl_get_dplls,
4351 	.put_dplls = icl_put_dplls,
4352 	.update_ref_clks = icl_update_dpll_ref_clks,
4353 	.dump_hw_state = icl_dump_hw_state,
4354 };
4355 
4356 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4357 	.enable = mg_pll_enable,
4358 	.disable = mg_pll_disable,
4359 	.get_hw_state = dkl_pll_get_hw_state,
4360 	.get_freq = icl_ddi_mg_pll_get_freq,
4361 };
4362 
4363 static const struct dpll_info tgl_plls[] = {
4364 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4365 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4366 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4367 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4368 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4369 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4370 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4371 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4372 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4373 	{ },
4374 };
4375 
4376 static const struct intel_dpll_mgr tgl_pll_mgr = {
4377 	.dpll_info = tgl_plls,
4378 	.get_dplls = icl_get_dplls,
4379 	.put_dplls = icl_put_dplls,
4380 	.update_active_dpll = icl_update_active_dpll,
4381 	.update_ref_clks = icl_update_dpll_ref_clks,
4382 	.dump_hw_state = icl_dump_hw_state,
4383 };
4384 
4385 static const struct dpll_info rkl_plls[] = {
4386 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4387 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4388 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4389 	{ },
4390 };
4391 
4392 static const struct intel_dpll_mgr rkl_pll_mgr = {
4393 	.dpll_info = rkl_plls,
4394 	.get_dplls = icl_get_dplls,
4395 	.put_dplls = icl_put_dplls,
4396 	.update_ref_clks = icl_update_dpll_ref_clks,
4397 	.dump_hw_state = icl_dump_hw_state,
4398 };
4399 
4400 static const struct dpll_info dg1_plls[] = {
4401 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4402 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4403 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4404 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4405 	{ },
4406 };
4407 
4408 static const struct intel_dpll_mgr dg1_pll_mgr = {
4409 	.dpll_info = dg1_plls,
4410 	.get_dplls = icl_get_dplls,
4411 	.put_dplls = icl_put_dplls,
4412 	.update_ref_clks = icl_update_dpll_ref_clks,
4413 	.dump_hw_state = icl_dump_hw_state,
4414 };
4415 
4416 static const struct dpll_info adls_plls[] = {
4417 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4418 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4419 	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4420 	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4421 	{ },
4422 };
4423 
4424 static const struct intel_dpll_mgr adls_pll_mgr = {
4425 	.dpll_info = adls_plls,
4426 	.get_dplls = icl_get_dplls,
4427 	.put_dplls = icl_put_dplls,
4428 	.update_ref_clks = icl_update_dpll_ref_clks,
4429 	.dump_hw_state = icl_dump_hw_state,
4430 };
4431 
4432 static const struct dpll_info adlp_plls[] = {
4433 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4434 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4435 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4436 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4437 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4438 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4439 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4440 	{ },
4441 };
4442 
4443 static const struct intel_dpll_mgr adlp_pll_mgr = {
4444 	.dpll_info = adlp_plls,
4445 	.get_dplls = icl_get_dplls,
4446 	.put_dplls = icl_put_dplls,
4447 	.update_active_dpll = icl_update_active_dpll,
4448 	.update_ref_clks = icl_update_dpll_ref_clks,
4449 	.dump_hw_state = icl_dump_hw_state,
4450 };
4451 
4452 /**
4453  * intel_shared_dpll_init - Initialize shared DPLLs
4454  * @dev: drm device
4455  *
4456  * Initialize shared DPLLs for @dev.
4457  */
4458 void intel_shared_dpll_init(struct drm_device *dev)
4459 {
4460 	struct drm_i915_private *dev_priv = to_i915(dev);
4461 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4462 	const struct dpll_info *dpll_info;
4463 	int i;
4464 
4465 	if (IS_ALDERLAKE_P(dev_priv))
4466 		dpll_mgr = &adlp_pll_mgr;
4467 	else if (IS_ALDERLAKE_S(dev_priv))
4468 		dpll_mgr = &adls_pll_mgr;
4469 	else if (IS_DG1(dev_priv))
4470 		dpll_mgr = &dg1_pll_mgr;
4471 	else if (IS_ROCKETLAKE(dev_priv))
4472 		dpll_mgr = &rkl_pll_mgr;
4473 	else if (DISPLAY_VER(dev_priv) >= 12)
4474 		dpll_mgr = &tgl_pll_mgr;
4475 	else if (IS_JSL_EHL(dev_priv))
4476 		dpll_mgr = &ehl_pll_mgr;
4477 	else if (DISPLAY_VER(dev_priv) >= 11)
4478 		dpll_mgr = &icl_pll_mgr;
4479 	else if (IS_CANNONLAKE(dev_priv))
4480 		dpll_mgr = &cnl_pll_mgr;
4481 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4482 		dpll_mgr = &bxt_pll_mgr;
4483 	else if (DISPLAY_VER(dev_priv) == 9)
4484 		dpll_mgr = &skl_pll_mgr;
4485 	else if (HAS_DDI(dev_priv))
4486 		dpll_mgr = &hsw_pll_mgr;
4487 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4488 		dpll_mgr = &pch_pll_mgr;
4489 
4490 	if (!dpll_mgr) {
4491 		dev_priv->dpll.num_shared_dpll = 0;
4492 		return;
4493 	}
4494 
4495 	dpll_info = dpll_mgr->dpll_info;
4496 
4497 	for (i = 0; dpll_info[i].name; i++) {
4498 		drm_WARN_ON(dev, i != dpll_info[i].id);
4499 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4500 	}
4501 
4502 	dev_priv->dpll.mgr = dpll_mgr;
4503 	dev_priv->dpll.num_shared_dpll = i;
4504 	mutex_init(&dev_priv->dpll.lock);
4505 
4506 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4507 }
4508 
4509 /**
4510  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4511  * @state: atomic state
4512  * @crtc: CRTC to reserve DPLLs for
4513  * @encoder: encoder
4514  *
4515  * This function reserves all required DPLLs for the given CRTC and encoder
4516  * combination in the current atomic commit @state and the new @crtc atomic
4517  * state.
4518  *
4519  * The new configuration in the atomic commit @state is made effective by
4520  * calling intel_shared_dpll_swap_state().
4521  *
4522  * The reserved DPLLs should be released by calling
4523  * intel_release_shared_dplls().
4524  *
4525  * Returns:
4526  * True if all required DPLLs were successfully reserved.
4527  */
4528 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4529 				struct intel_crtc *crtc,
4530 				struct intel_encoder *encoder)
4531 {
4532 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4533 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4534 
4535 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4536 		return false;
4537 
4538 	return dpll_mgr->get_dplls(state, crtc, encoder);
4539 }
4540 
4541 /**
4542  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4543  * @state: atomic state
4544  * @crtc: crtc from which the DPLLs are to be released
4545  *
4546  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4547  * from the current atomic commit @state and the old @crtc atomic state.
4548  *
4549  * The new configuration in the atomic commit @state is made effective by
4550  * calling intel_shared_dpll_swap_state().
4551  */
4552 void intel_release_shared_dplls(struct intel_atomic_state *state,
4553 				struct intel_crtc *crtc)
4554 {
4555 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4556 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4557 
4558 	/*
4559 	 * FIXME: this function is called for every platform having a
4560 	 * compute_clock hook, even though the platform doesn't yet support
4561 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4562 	 * called on those.
4563 	 */
4564 	if (!dpll_mgr)
4565 		return;
4566 
4567 	dpll_mgr->put_dplls(state, crtc);
4568 }
4569 
4570 /**
4571  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4572  * @state: atomic state
4573  * @crtc: the CRTC for which to update the active DPLL
4574  * @encoder: encoder determining the type of port DPLL
4575  *
4576  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4577  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4578  * DPLL selected will be based on the current mode of the encoder's port.
4579  */
4580 void intel_update_active_dpll(struct intel_atomic_state *state,
4581 			      struct intel_crtc *crtc,
4582 			      struct intel_encoder *encoder)
4583 {
4584 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4585 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4586 
4587 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4588 		return;
4589 
4590 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4591 }
4592 
4593 /**
4594  * intel_dpll_get_freq - calculate the DPLL's output frequency
4595  * @i915: i915 device
4596  * @pll: DPLL for which to calculate the output frequency
4597  * @pll_state: DPLL state from which to calculate the output frequency
4598  *
4599  * Return the output frequency corresponding to @pll's passed in @pll_state.
4600  */
4601 int intel_dpll_get_freq(struct drm_i915_private *i915,
4602 			const struct intel_shared_dpll *pll,
4603 			const struct intel_dpll_hw_state *pll_state)
4604 {
4605 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4606 		return 0;
4607 
4608 	return pll->info->funcs->get_freq(i915, pll, pll_state);
4609 }
4610 
4611 /**
4612  * intel_dpll_get_hw_state - readout the DPLL's hardware state
4613  * @i915: i915 device
4614  * @pll: DPLL for which to calculate the output frequency
4615  * @hw_state: DPLL's hardware state
4616  *
4617  * Read out @pll's hardware state into @hw_state.
4618  */
4619 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4620 			     struct intel_shared_dpll *pll,
4621 			     struct intel_dpll_hw_state *hw_state)
4622 {
4623 	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4624 }
4625 
4626 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4627 				  struct intel_shared_dpll *pll)
4628 {
4629 	struct intel_crtc *crtc;
4630 
4631 	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4632 
4633 	if (IS_JSL_EHL(i915) && pll->on &&
4634 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4635 		pll->wakeref = intel_display_power_get(i915,
4636 						       POWER_DOMAIN_DPLL_DC_OFF);
4637 	}
4638 
4639 	pll->state.pipe_mask = 0;
4640 	for_each_intel_crtc(&i915->drm, crtc) {
4641 		struct intel_crtc_state *crtc_state =
4642 			to_intel_crtc_state(crtc->base.state);
4643 
4644 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4645 			pll->state.pipe_mask |= BIT(crtc->pipe);
4646 	}
4647 	pll->active_mask = pll->state.pipe_mask;
4648 
4649 	drm_dbg_kms(&i915->drm,
4650 		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
4651 		    pll->info->name, pll->state.pipe_mask, pll->on);
4652 }
4653 
4654 void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4655 {
4656 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4657 		i915->dpll.mgr->update_ref_clks(i915);
4658 }
4659 
4660 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4661 {
4662 	int i;
4663 
4664 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4665 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4666 }
4667 
4668 static void sanitize_dpll_state(struct drm_i915_private *i915,
4669 				struct intel_shared_dpll *pll)
4670 {
4671 	if (!pll->on || pll->active_mask)
4672 		return;
4673 
4674 	drm_dbg_kms(&i915->drm,
4675 		    "%s enabled but not in use, disabling\n",
4676 		    pll->info->name);
4677 
4678 	pll->info->funcs->disable(i915, pll);
4679 	pll->on = false;
4680 }
4681 
4682 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4683 {
4684 	int i;
4685 
4686 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4687 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4688 }
4689 
4690 /**
4691  * intel_dpll_dump_hw_state - write hw_state to dmesg
4692  * @dev_priv: i915 drm device
4693  * @hw_state: hw state to be written to the log
4694  *
4695  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4696  */
4697 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4698 			      const struct intel_dpll_hw_state *hw_state)
4699 {
4700 	if (dev_priv->dpll.mgr) {
4701 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4702 	} else {
4703 		/* fallback for platforms that don't use the shared dpll
4704 		 * infrastructure
4705 		 */
4706 		drm_dbg_kms(&dev_priv->drm,
4707 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4708 			    "fp0: 0x%x, fp1: 0x%x\n",
4709 			    hw_state->dpll,
4710 			    hw_state->dpll_md,
4711 			    hw_state->fp0,
4712 			    hw_state->fp1);
4713 	}
4714 }
4715