1 /*
2  * Copyright © 2006-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
27 
28 /**
29  * DOC: Display PLLs
30  *
31  * Display PLLs used for driving outputs vary by platform. While some have
32  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33  * from a pool. In the latter scenario, it is possible that multiple pipes
34  * share a PLL if their configurations match.
35  *
36  * This file provides an abstraction over display PLLs. The function
37  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
38  * users of a PLL are tracked and that tracking is integrated with the atomic
39  * modset interface. During an atomic operation, required PLLs can be reserved
40  * for a given CRTC and encoder configuration by calling
41  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42  * with intel_release_shared_dplls().
43  * Changes to the users are first staged in the atomic state, and then made
44  * effective by calling intel_shared_dpll_swap_state() during the atomic
45  * commit phase.
46  */
47 
48 struct intel_dpll_mgr {
49 	const struct dpll_info *dpll_info;
50 
51 	bool (*get_dplls)(struct intel_atomic_state *state,
52 			  struct intel_crtc *crtc,
53 			  struct intel_encoder *encoder);
54 	void (*put_dplls)(struct intel_atomic_state *state,
55 			  struct intel_crtc *crtc);
56 	void (*update_active_dpll)(struct intel_atomic_state *state,
57 				   struct intel_crtc *crtc,
58 				   struct intel_encoder *encoder);
59 	void (*update_ref_clks)(struct drm_i915_private *i915);
60 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
61 			      const struct intel_dpll_hw_state *hw_state);
62 };
63 
64 static void
65 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
66 				  struct intel_shared_dpll_state *shared_dpll)
67 {
68 	enum intel_dpll_id i;
69 
70 	/* Copy shared dpll state */
71 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
72 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
73 
74 		shared_dpll[i] = pll->state;
75 	}
76 }
77 
78 static struct intel_shared_dpll_state *
79 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
80 {
81 	struct intel_atomic_state *state = to_intel_atomic_state(s);
82 
83 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
84 
85 	if (!state->dpll_set) {
86 		state->dpll_set = true;
87 
88 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
89 						  state->shared_dpll);
90 	}
91 
92 	return state->shared_dpll;
93 }
94 
95 /**
96  * intel_get_shared_dpll_by_id - get a DPLL given its id
97  * @dev_priv: i915 device instance
98  * @id: pll id
99  *
100  * Returns:
101  * A pointer to the DPLL with @id
102  */
103 struct intel_shared_dpll *
104 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
105 			    enum intel_dpll_id id)
106 {
107 	return &dev_priv->dpll.shared_dplls[id];
108 }
109 
110 /**
111  * intel_get_shared_dpll_id - get the id of a DPLL
112  * @dev_priv: i915 device instance
113  * @pll: the DPLL
114  *
115  * Returns:
116  * The id of @pll
117  */
118 enum intel_dpll_id
119 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
120 			 struct intel_shared_dpll *pll)
121 {
122 	long pll_idx = pll - dev_priv->dpll.shared_dplls;
123 
124 	if (drm_WARN_ON(&dev_priv->drm,
125 			pll_idx < 0 ||
126 			pll_idx >= dev_priv->dpll.num_shared_dpll))
127 		return -1;
128 
129 	return pll_idx;
130 }
131 
132 /* For ILK+ */
133 void assert_shared_dpll(struct drm_i915_private *dev_priv,
134 			struct intel_shared_dpll *pll,
135 			bool state)
136 {
137 	bool cur_state;
138 	struct intel_dpll_hw_state hw_state;
139 
140 	if (drm_WARN(&dev_priv->drm, !pll,
141 		     "asserting DPLL %s with no DPLL\n", onoff(state)))
142 		return;
143 
144 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
145 	I915_STATE_WARN(cur_state != state,
146 	     "%s assertion failure (expected %s, current %s)\n",
147 			pll->info->name, onoff(state), onoff(cur_state));
148 }
149 
150 /**
151  * intel_prepare_shared_dpll - call a dpll's prepare hook
152  * @crtc_state: CRTC, and its state, which has a shared dpll
153  *
154  * This calls the PLL's prepare hook if it has one and if the PLL is not
155  * already enabled. The prepare hook is platform specific.
156  */
157 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
158 {
159 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
160 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
161 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
162 
163 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
164 		return;
165 
166 	mutex_lock(&dev_priv->dpll.lock);
167 	drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
168 	if (!pll->active_mask) {
169 		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
170 		drm_WARN_ON(&dev_priv->drm, pll->on);
171 		assert_shared_dpll_disabled(dev_priv, pll);
172 
173 		pll->info->funcs->prepare(dev_priv, pll);
174 	}
175 	mutex_unlock(&dev_priv->dpll.lock);
176 }
177 
178 /**
179  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
180  * @crtc_state: CRTC, and its state, which has a shared DPLL
181  *
182  * Enable the shared DPLL used by @crtc.
183  */
184 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
185 {
186 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
187 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
188 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
189 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
190 	unsigned int old_mask;
191 
192 	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
193 		return;
194 
195 	mutex_lock(&dev_priv->dpll.lock);
196 	old_mask = pll->active_mask;
197 
198 	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
199 	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
200 		goto out;
201 
202 	pll->active_mask |= crtc_mask;
203 
204 	drm_dbg_kms(&dev_priv->drm,
205 		    "enable %s (active %x, on? %d) for crtc %d\n",
206 		    pll->info->name, pll->active_mask, pll->on,
207 		    crtc->base.base.id);
208 
209 	if (old_mask) {
210 		drm_WARN_ON(&dev_priv->drm, !pll->on);
211 		assert_shared_dpll_enabled(dev_priv, pll);
212 		goto out;
213 	}
214 	drm_WARN_ON(&dev_priv->drm, pll->on);
215 
216 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
217 	pll->info->funcs->enable(dev_priv, pll);
218 	pll->on = true;
219 
220 out:
221 	mutex_unlock(&dev_priv->dpll.lock);
222 }
223 
224 /**
225  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
226  * @crtc_state: CRTC, and its state, which has a shared DPLL
227  *
228  * Disable the shared DPLL used by @crtc.
229  */
230 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
231 {
232 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
233 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
234 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
235 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
236 
237 	/* PCH only available on ILK+ */
238 	if (INTEL_GEN(dev_priv) < 5)
239 		return;
240 
241 	if (pll == NULL)
242 		return;
243 
244 	mutex_lock(&dev_priv->dpll.lock);
245 	if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
246 		goto out;
247 
248 	drm_dbg_kms(&dev_priv->drm,
249 		    "disable %s (active %x, on? %d) for crtc %d\n",
250 		    pll->info->name, pll->active_mask, pll->on,
251 		    crtc->base.base.id);
252 
253 	assert_shared_dpll_enabled(dev_priv, pll);
254 	drm_WARN_ON(&dev_priv->drm, !pll->on);
255 
256 	pll->active_mask &= ~crtc_mask;
257 	if (pll->active_mask)
258 		goto out;
259 
260 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
261 	pll->info->funcs->disable(dev_priv, pll);
262 	pll->on = false;
263 
264 out:
265 	mutex_unlock(&dev_priv->dpll.lock);
266 }
267 
268 static struct intel_shared_dpll *
269 intel_find_shared_dpll(struct intel_atomic_state *state,
270 		       const struct intel_crtc *crtc,
271 		       const struct intel_dpll_hw_state *pll_state,
272 		       unsigned long dpll_mask)
273 {
274 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
275 	struct intel_shared_dpll *pll, *unused_pll = NULL;
276 	struct intel_shared_dpll_state *shared_dpll;
277 	enum intel_dpll_id i;
278 
279 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
280 
281 	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
282 
283 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
284 		pll = &dev_priv->dpll.shared_dplls[i];
285 
286 		/* Only want to check enabled timings first */
287 		if (shared_dpll[i].crtc_mask == 0) {
288 			if (!unused_pll)
289 				unused_pll = pll;
290 			continue;
291 		}
292 
293 		if (memcmp(pll_state,
294 			   &shared_dpll[i].hw_state,
295 			   sizeof(*pll_state)) == 0) {
296 			drm_dbg_kms(&dev_priv->drm,
297 				    "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
298 				    crtc->base.base.id, crtc->base.name,
299 				    pll->info->name,
300 				    shared_dpll[i].crtc_mask,
301 				    pll->active_mask);
302 			return pll;
303 		}
304 	}
305 
306 	/* Ok no matching timings, maybe there's a free one? */
307 	if (unused_pll) {
308 		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
309 			    crtc->base.base.id, crtc->base.name,
310 			    unused_pll->info->name);
311 		return unused_pll;
312 	}
313 
314 	return NULL;
315 }
316 
317 static void
318 intel_reference_shared_dpll(struct intel_atomic_state *state,
319 			    const struct intel_crtc *crtc,
320 			    const struct intel_shared_dpll *pll,
321 			    const struct intel_dpll_hw_state *pll_state)
322 {
323 	struct drm_i915_private *i915 = to_i915(state->base.dev);
324 	struct intel_shared_dpll_state *shared_dpll;
325 	const enum intel_dpll_id id = pll->info->id;
326 
327 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
328 
329 	if (shared_dpll[id].crtc_mask == 0)
330 		shared_dpll[id].hw_state = *pll_state;
331 
332 	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
333 		pipe_name(crtc->pipe));
334 
335 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
336 }
337 
338 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
339 					  const struct intel_crtc *crtc,
340 					  const struct intel_shared_dpll *pll)
341 {
342 	struct intel_shared_dpll_state *shared_dpll;
343 
344 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
345 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
346 }
347 
348 static void intel_put_dpll(struct intel_atomic_state *state,
349 			   struct intel_crtc *crtc)
350 {
351 	const struct intel_crtc_state *old_crtc_state =
352 		intel_atomic_get_old_crtc_state(state, crtc);
353 	struct intel_crtc_state *new_crtc_state =
354 		intel_atomic_get_new_crtc_state(state, crtc);
355 
356 	new_crtc_state->shared_dpll = NULL;
357 
358 	if (!old_crtc_state->shared_dpll)
359 		return;
360 
361 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
362 }
363 
364 /**
365  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
366  * @state: atomic state
367  *
368  * This is the dpll version of drm_atomic_helper_swap_state() since the
369  * helper does not handle driver-specific global state.
370  *
371  * For consistency with atomic helpers this function does a complete swap,
372  * i.e. it also puts the current state into @state, even though there is no
373  * need for that at this moment.
374  */
375 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
376 {
377 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
378 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
379 	enum intel_dpll_id i;
380 
381 	if (!state->dpll_set)
382 		return;
383 
384 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
385 		struct intel_shared_dpll *pll =
386 			&dev_priv->dpll.shared_dplls[i];
387 
388 		swap(pll->state, shared_dpll[i]);
389 	}
390 }
391 
392 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
393 				      struct intel_shared_dpll *pll,
394 				      struct intel_dpll_hw_state *hw_state)
395 {
396 	const enum intel_dpll_id id = pll->info->id;
397 	intel_wakeref_t wakeref;
398 	u32 val;
399 
400 	wakeref = intel_display_power_get_if_enabled(dev_priv,
401 						     POWER_DOMAIN_DISPLAY_CORE);
402 	if (!wakeref)
403 		return false;
404 
405 	val = intel_de_read(dev_priv, PCH_DPLL(id));
406 	hw_state->dpll = val;
407 	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
408 	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
409 
410 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
411 
412 	return val & DPLL_VCO_ENABLE;
413 }
414 
415 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
416 				 struct intel_shared_dpll *pll)
417 {
418 	const enum intel_dpll_id id = pll->info->id;
419 
420 	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
421 	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
422 }
423 
424 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
425 {
426 	u32 val;
427 	bool enabled;
428 
429 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
430 
431 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
432 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
433 			    DREF_SUPERSPREAD_SOURCE_MASK));
434 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
435 }
436 
437 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
438 				struct intel_shared_dpll *pll)
439 {
440 	const enum intel_dpll_id id = pll->info->id;
441 
442 	/* PCH refclock must be enabled first */
443 	ibx_assert_pch_refclk_enabled(dev_priv);
444 
445 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
446 
447 	/* Wait for the clocks to stabilize. */
448 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
449 	udelay(150);
450 
451 	/* The pixel multiplier can only be updated once the
452 	 * DPLL is enabled and the clocks are stable.
453 	 *
454 	 * So write it again.
455 	 */
456 	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
457 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
458 	udelay(200);
459 }
460 
461 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
462 				 struct intel_shared_dpll *pll)
463 {
464 	const enum intel_dpll_id id = pll->info->id;
465 
466 	intel_de_write(dev_priv, PCH_DPLL(id), 0);
467 	intel_de_posting_read(dev_priv, PCH_DPLL(id));
468 	udelay(200);
469 }
470 
471 static bool ibx_get_dpll(struct intel_atomic_state *state,
472 			 struct intel_crtc *crtc,
473 			 struct intel_encoder *encoder)
474 {
475 	struct intel_crtc_state *crtc_state =
476 		intel_atomic_get_new_crtc_state(state, crtc);
477 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
478 	struct intel_shared_dpll *pll;
479 	enum intel_dpll_id i;
480 
481 	if (HAS_PCH_IBX(dev_priv)) {
482 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
483 		i = (enum intel_dpll_id) crtc->pipe;
484 		pll = &dev_priv->dpll.shared_dplls[i];
485 
486 		drm_dbg_kms(&dev_priv->drm,
487 			    "[CRTC:%d:%s] using pre-allocated %s\n",
488 			    crtc->base.base.id, crtc->base.name,
489 			    pll->info->name);
490 	} else {
491 		pll = intel_find_shared_dpll(state, crtc,
492 					     &crtc_state->dpll_hw_state,
493 					     BIT(DPLL_ID_PCH_PLL_B) |
494 					     BIT(DPLL_ID_PCH_PLL_A));
495 	}
496 
497 	if (!pll)
498 		return false;
499 
500 	/* reference the pll */
501 	intel_reference_shared_dpll(state, crtc,
502 				    pll, &crtc_state->dpll_hw_state);
503 
504 	crtc_state->shared_dpll = pll;
505 
506 	return true;
507 }
508 
509 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
510 			      const struct intel_dpll_hw_state *hw_state)
511 {
512 	drm_dbg_kms(&dev_priv->drm,
513 		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
514 		    "fp0: 0x%x, fp1: 0x%x\n",
515 		    hw_state->dpll,
516 		    hw_state->dpll_md,
517 		    hw_state->fp0,
518 		    hw_state->fp1);
519 }
520 
521 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
522 	.prepare = ibx_pch_dpll_prepare,
523 	.enable = ibx_pch_dpll_enable,
524 	.disable = ibx_pch_dpll_disable,
525 	.get_hw_state = ibx_pch_dpll_get_hw_state,
526 };
527 
528 static const struct dpll_info pch_plls[] = {
529 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
530 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
531 	{ },
532 };
533 
534 static const struct intel_dpll_mgr pch_pll_mgr = {
535 	.dpll_info = pch_plls,
536 	.get_dplls = ibx_get_dpll,
537 	.put_dplls = intel_put_dpll,
538 	.dump_hw_state = ibx_dump_hw_state,
539 };
540 
541 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
542 			       struct intel_shared_dpll *pll)
543 {
544 	const enum intel_dpll_id id = pll->info->id;
545 
546 	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
547 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
548 	udelay(20);
549 }
550 
551 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
552 				struct intel_shared_dpll *pll)
553 {
554 	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
555 	intel_de_posting_read(dev_priv, SPLL_CTL);
556 	udelay(20);
557 }
558 
559 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
560 				  struct intel_shared_dpll *pll)
561 {
562 	const enum intel_dpll_id id = pll->info->id;
563 	u32 val;
564 
565 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
566 	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
567 	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
568 
569 	/*
570 	 * Try to set up the PCH reference clock once all DPLLs
571 	 * that depend on it have been shut down.
572 	 */
573 	if (dev_priv->pch_ssc_use & BIT(id))
574 		intel_init_pch_refclk(dev_priv);
575 }
576 
577 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
578 				 struct intel_shared_dpll *pll)
579 {
580 	enum intel_dpll_id id = pll->info->id;
581 	u32 val;
582 
583 	val = intel_de_read(dev_priv, SPLL_CTL);
584 	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
585 	intel_de_posting_read(dev_priv, SPLL_CTL);
586 
587 	/*
588 	 * Try to set up the PCH reference clock once all DPLLs
589 	 * that depend on it have been shut down.
590 	 */
591 	if (dev_priv->pch_ssc_use & BIT(id))
592 		intel_init_pch_refclk(dev_priv);
593 }
594 
595 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
596 				       struct intel_shared_dpll *pll,
597 				       struct intel_dpll_hw_state *hw_state)
598 {
599 	const enum intel_dpll_id id = pll->info->id;
600 	intel_wakeref_t wakeref;
601 	u32 val;
602 
603 	wakeref = intel_display_power_get_if_enabled(dev_priv,
604 						     POWER_DOMAIN_DISPLAY_CORE);
605 	if (!wakeref)
606 		return false;
607 
608 	val = intel_de_read(dev_priv, WRPLL_CTL(id));
609 	hw_state->wrpll = val;
610 
611 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
612 
613 	return val & WRPLL_PLL_ENABLE;
614 }
615 
616 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
617 				      struct intel_shared_dpll *pll,
618 				      struct intel_dpll_hw_state *hw_state)
619 {
620 	intel_wakeref_t wakeref;
621 	u32 val;
622 
623 	wakeref = intel_display_power_get_if_enabled(dev_priv,
624 						     POWER_DOMAIN_DISPLAY_CORE);
625 	if (!wakeref)
626 		return false;
627 
628 	val = intel_de_read(dev_priv, SPLL_CTL);
629 	hw_state->spll = val;
630 
631 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
632 
633 	return val & SPLL_PLL_ENABLE;
634 }
635 
636 #define LC_FREQ 2700
637 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
638 
639 #define P_MIN 2
640 #define P_MAX 64
641 #define P_INC 2
642 
643 /* Constraints for PLL good behavior */
644 #define REF_MIN 48
645 #define REF_MAX 400
646 #define VCO_MIN 2400
647 #define VCO_MAX 4800
648 
649 struct hsw_wrpll_rnp {
650 	unsigned p, n2, r2;
651 };
652 
653 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
654 {
655 	unsigned budget;
656 
657 	switch (clock) {
658 	case 25175000:
659 	case 25200000:
660 	case 27000000:
661 	case 27027000:
662 	case 37762500:
663 	case 37800000:
664 	case 40500000:
665 	case 40541000:
666 	case 54000000:
667 	case 54054000:
668 	case 59341000:
669 	case 59400000:
670 	case 72000000:
671 	case 74176000:
672 	case 74250000:
673 	case 81000000:
674 	case 81081000:
675 	case 89012000:
676 	case 89100000:
677 	case 108000000:
678 	case 108108000:
679 	case 111264000:
680 	case 111375000:
681 	case 148352000:
682 	case 148500000:
683 	case 162000000:
684 	case 162162000:
685 	case 222525000:
686 	case 222750000:
687 	case 296703000:
688 	case 297000000:
689 		budget = 0;
690 		break;
691 	case 233500000:
692 	case 245250000:
693 	case 247750000:
694 	case 253250000:
695 	case 298000000:
696 		budget = 1500;
697 		break;
698 	case 169128000:
699 	case 169500000:
700 	case 179500000:
701 	case 202000000:
702 		budget = 2000;
703 		break;
704 	case 256250000:
705 	case 262500000:
706 	case 270000000:
707 	case 272500000:
708 	case 273750000:
709 	case 280750000:
710 	case 281250000:
711 	case 286000000:
712 	case 291750000:
713 		budget = 4000;
714 		break;
715 	case 267250000:
716 	case 268500000:
717 		budget = 5000;
718 		break;
719 	default:
720 		budget = 1000;
721 		break;
722 	}
723 
724 	return budget;
725 }
726 
727 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
728 				 unsigned int r2, unsigned int n2,
729 				 unsigned int p,
730 				 struct hsw_wrpll_rnp *best)
731 {
732 	u64 a, b, c, d, diff, diff_best;
733 
734 	/* No best (r,n,p) yet */
735 	if (best->p == 0) {
736 		best->p = p;
737 		best->n2 = n2;
738 		best->r2 = r2;
739 		return;
740 	}
741 
742 	/*
743 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
744 	 * freq2k.
745 	 *
746 	 * delta = 1e6 *
747 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
748 	 *	   freq2k;
749 	 *
750 	 * and we would like delta <= budget.
751 	 *
752 	 * If the discrepancy is above the PPM-based budget, always prefer to
753 	 * improve upon the previous solution.  However, if you're within the
754 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
755 	 */
756 	a = freq2k * budget * p * r2;
757 	b = freq2k * budget * best->p * best->r2;
758 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
759 	diff_best = abs_diff(freq2k * best->p * best->r2,
760 			     LC_FREQ_2K * best->n2);
761 	c = 1000000 * diff;
762 	d = 1000000 * diff_best;
763 
764 	if (a < c && b < d) {
765 		/* If both are above the budget, pick the closer */
766 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
767 			best->p = p;
768 			best->n2 = n2;
769 			best->r2 = r2;
770 		}
771 	} else if (a >= c && b < d) {
772 		/* If A is below the threshold but B is above it?  Update. */
773 		best->p = p;
774 		best->n2 = n2;
775 		best->r2 = r2;
776 	} else if (a >= c && b >= d) {
777 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
778 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
779 			best->p = p;
780 			best->n2 = n2;
781 			best->r2 = r2;
782 		}
783 	}
784 	/* Otherwise a < c && b >= d, do nothing */
785 }
786 
787 static void
788 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
789 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
790 {
791 	u64 freq2k;
792 	unsigned p, n2, r2;
793 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
794 	unsigned budget;
795 
796 	freq2k = clock / 100;
797 
798 	budget = hsw_wrpll_get_budget_for_freq(clock);
799 
800 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
801 	 * and directly pass the LC PLL to it. */
802 	if (freq2k == 5400000) {
803 		*n2_out = 2;
804 		*p_out = 1;
805 		*r2_out = 2;
806 		return;
807 	}
808 
809 	/*
810 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
811 	 * the WR PLL.
812 	 *
813 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
814 	 * Injecting R2 = 2 * R gives:
815 	 *   REF_MAX * r2 > LC_FREQ * 2 and
816 	 *   REF_MIN * r2 < LC_FREQ * 2
817 	 *
818 	 * Which means the desired boundaries for r2 are:
819 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
820 	 *
821 	 */
822 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
823 	     r2 <= LC_FREQ * 2 / REF_MIN;
824 	     r2++) {
825 
826 		/*
827 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
828 		 *
829 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
830 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
831 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
832 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
833 		 *
834 		 * Which means the desired boundaries for n2 are:
835 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
836 		 */
837 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
838 		     n2 <= VCO_MAX * r2 / LC_FREQ;
839 		     n2++) {
840 
841 			for (p = P_MIN; p <= P_MAX; p += P_INC)
842 				hsw_wrpll_update_rnp(freq2k, budget,
843 						     r2, n2, p, &best);
844 		}
845 	}
846 
847 	*n2_out = best.n2;
848 	*p_out = best.p;
849 	*r2_out = best.r2;
850 }
851 
852 static struct intel_shared_dpll *
853 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
854 		       struct intel_crtc *crtc)
855 {
856 	struct intel_crtc_state *crtc_state =
857 		intel_atomic_get_new_crtc_state(state, crtc);
858 	struct intel_shared_dpll *pll;
859 	u32 val;
860 	unsigned int p, n2, r2;
861 
862 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
863 
864 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
865 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
866 	      WRPLL_DIVIDER_POST(p);
867 
868 	crtc_state->dpll_hw_state.wrpll = val;
869 
870 	pll = intel_find_shared_dpll(state, crtc,
871 				     &crtc_state->dpll_hw_state,
872 				     BIT(DPLL_ID_WRPLL2) |
873 				     BIT(DPLL_ID_WRPLL1));
874 
875 	if (!pll)
876 		return NULL;
877 
878 	return pll;
879 }
880 
881 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
882 				  const struct intel_shared_dpll *pll)
883 {
884 	int refclk;
885 	int n, p, r;
886 	u32 wrpll = pll->state.hw_state.wrpll;
887 
888 	switch (wrpll & WRPLL_REF_MASK) {
889 	case WRPLL_REF_SPECIAL_HSW:
890 		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
891 		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
892 			refclk = dev_priv->dpll.ref_clks.nssc;
893 			break;
894 		}
895 		/* fall through */
896 	case WRPLL_REF_PCH_SSC:
897 		/*
898 		 * We could calculate spread here, but our checking
899 		 * code only cares about 5% accuracy, and spread is a max of
900 		 * 0.5% downspread.
901 		 */
902 		refclk = dev_priv->dpll.ref_clks.ssc;
903 		break;
904 	case WRPLL_REF_LCPLL:
905 		refclk = 2700000;
906 		break;
907 	default:
908 		MISSING_CASE(wrpll);
909 		return 0;
910 	}
911 
912 	r = wrpll & WRPLL_DIVIDER_REF_MASK;
913 	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
914 	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
915 
916 	/* Convert to KHz, p & r have a fixed point portion */
917 	return (refclk * n / 10) / (p * r) * 2;
918 }
919 
920 static struct intel_shared_dpll *
921 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
922 {
923 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
924 	struct intel_shared_dpll *pll;
925 	enum intel_dpll_id pll_id;
926 	int clock = crtc_state->port_clock;
927 
928 	switch (clock / 2) {
929 	case 81000:
930 		pll_id = DPLL_ID_LCPLL_810;
931 		break;
932 	case 135000:
933 		pll_id = DPLL_ID_LCPLL_1350;
934 		break;
935 	case 270000:
936 		pll_id = DPLL_ID_LCPLL_2700;
937 		break;
938 	default:
939 		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
940 			    clock);
941 		return NULL;
942 	}
943 
944 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
945 
946 	if (!pll)
947 		return NULL;
948 
949 	return pll;
950 }
951 
952 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
953 				  const struct intel_shared_dpll *pll)
954 {
955 	int link_clock = 0;
956 
957 	switch (pll->info->id) {
958 	case DPLL_ID_LCPLL_810:
959 		link_clock = 81000;
960 		break;
961 	case DPLL_ID_LCPLL_1350:
962 		link_clock = 135000;
963 		break;
964 	case DPLL_ID_LCPLL_2700:
965 		link_clock = 270000;
966 		break;
967 	default:
968 		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
969 		break;
970 	}
971 
972 	return link_clock * 2;
973 }
974 
975 static struct intel_shared_dpll *
976 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
977 		      struct intel_crtc *crtc)
978 {
979 	struct intel_crtc_state *crtc_state =
980 		intel_atomic_get_new_crtc_state(state, crtc);
981 
982 	if (WARN_ON(crtc_state->port_clock / 2 != 135000))
983 		return NULL;
984 
985 	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
986 					 SPLL_REF_MUXED_SSC;
987 
988 	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
989 				      BIT(DPLL_ID_SPLL));
990 }
991 
992 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
993 				 const struct intel_shared_dpll *pll)
994 {
995 	int link_clock = 0;
996 
997 	switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
998 	case SPLL_FREQ_810MHz:
999 		link_clock = 81000;
1000 		break;
1001 	case SPLL_FREQ_1350MHz:
1002 		link_clock = 135000;
1003 		break;
1004 	case SPLL_FREQ_2700MHz:
1005 		link_clock = 270000;
1006 		break;
1007 	default:
1008 		drm_WARN(&i915->drm, 1, "bad spll freq\n");
1009 		break;
1010 	}
1011 
1012 	return link_clock * 2;
1013 }
1014 
1015 static bool hsw_get_dpll(struct intel_atomic_state *state,
1016 			 struct intel_crtc *crtc,
1017 			 struct intel_encoder *encoder)
1018 {
1019 	struct intel_crtc_state *crtc_state =
1020 		intel_atomic_get_new_crtc_state(state, crtc);
1021 	struct intel_shared_dpll *pll;
1022 
1023 	memset(&crtc_state->dpll_hw_state, 0,
1024 	       sizeof(crtc_state->dpll_hw_state));
1025 
1026 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1027 		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1028 	else if (intel_crtc_has_dp_encoder(crtc_state))
1029 		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1030 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1031 		pll = hsw_ddi_spll_get_dpll(state, crtc);
1032 	else
1033 		return false;
1034 
1035 	if (!pll)
1036 		return false;
1037 
1038 	intel_reference_shared_dpll(state, crtc,
1039 				    pll, &crtc_state->dpll_hw_state);
1040 
1041 	crtc_state->shared_dpll = pll;
1042 
1043 	return true;
1044 }
1045 
1046 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1047 {
1048 	i915->dpll.ref_clks.ssc = 135000;
1049 	/* Non-SSC is only used on non-ULT HSW. */
1050 	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1051 		i915->dpll.ref_clks.nssc = 24000;
1052 	else
1053 		i915->dpll.ref_clks.nssc = 135000;
1054 }
1055 
1056 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1057 			      const struct intel_dpll_hw_state *hw_state)
1058 {
1059 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1060 		    hw_state->wrpll, hw_state->spll);
1061 }
1062 
1063 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1064 	.enable = hsw_ddi_wrpll_enable,
1065 	.disable = hsw_ddi_wrpll_disable,
1066 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1067 	.get_freq = hsw_ddi_wrpll_get_freq,
1068 };
1069 
1070 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1071 	.enable = hsw_ddi_spll_enable,
1072 	.disable = hsw_ddi_spll_disable,
1073 	.get_hw_state = hsw_ddi_spll_get_hw_state,
1074 	.get_freq = hsw_ddi_spll_get_freq,
1075 };
1076 
1077 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1078 				 struct intel_shared_dpll *pll)
1079 {
1080 }
1081 
1082 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1083 				  struct intel_shared_dpll *pll)
1084 {
1085 }
1086 
1087 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1088 				       struct intel_shared_dpll *pll,
1089 				       struct intel_dpll_hw_state *hw_state)
1090 {
1091 	return true;
1092 }
1093 
1094 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1095 	.enable = hsw_ddi_lcpll_enable,
1096 	.disable = hsw_ddi_lcpll_disable,
1097 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1098 	.get_freq = hsw_ddi_lcpll_get_freq,
1099 };
1100 
1101 static const struct dpll_info hsw_plls[] = {
1102 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1103 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1104 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1105 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1106 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1107 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1108 	{ },
1109 };
1110 
1111 static const struct intel_dpll_mgr hsw_pll_mgr = {
1112 	.dpll_info = hsw_plls,
1113 	.get_dplls = hsw_get_dpll,
1114 	.put_dplls = intel_put_dpll,
1115 	.update_ref_clks = hsw_update_dpll_ref_clks,
1116 	.dump_hw_state = hsw_dump_hw_state,
1117 };
1118 
1119 struct skl_dpll_regs {
1120 	i915_reg_t ctl, cfgcr1, cfgcr2;
1121 };
1122 
1123 /* this array is indexed by the *shared* pll id */
1124 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1125 	{
1126 		/* DPLL 0 */
1127 		.ctl = LCPLL1_CTL,
1128 		/* DPLL 0 doesn't support HDMI mode */
1129 	},
1130 	{
1131 		/* DPLL 1 */
1132 		.ctl = LCPLL2_CTL,
1133 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1134 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1135 	},
1136 	{
1137 		/* DPLL 2 */
1138 		.ctl = WRPLL_CTL(0),
1139 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1140 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1141 	},
1142 	{
1143 		/* DPLL 3 */
1144 		.ctl = WRPLL_CTL(1),
1145 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1146 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1147 	},
1148 };
1149 
1150 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1151 				    struct intel_shared_dpll *pll)
1152 {
1153 	const enum intel_dpll_id id = pll->info->id;
1154 	u32 val;
1155 
1156 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1157 
1158 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1159 		 DPLL_CTRL1_SSC(id) |
1160 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1161 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1162 
1163 	intel_de_write(dev_priv, DPLL_CTRL1, val);
1164 	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1165 }
1166 
1167 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1168 			       struct intel_shared_dpll *pll)
1169 {
1170 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1171 	const enum intel_dpll_id id = pll->info->id;
1172 
1173 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1174 
1175 	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1176 	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1177 	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1178 	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1179 
1180 	/* the enable bit is always bit 31 */
1181 	intel_de_write(dev_priv, regs[id].ctl,
1182 		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1183 
1184 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1185 		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1186 }
1187 
1188 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1189 				 struct intel_shared_dpll *pll)
1190 {
1191 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1192 }
1193 
1194 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1195 				struct intel_shared_dpll *pll)
1196 {
1197 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1198 	const enum intel_dpll_id id = pll->info->id;
1199 
1200 	/* the enable bit is always bit 31 */
1201 	intel_de_write(dev_priv, regs[id].ctl,
1202 		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1203 	intel_de_posting_read(dev_priv, regs[id].ctl);
1204 }
1205 
1206 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1207 				  struct intel_shared_dpll *pll)
1208 {
1209 }
1210 
1211 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1212 				     struct intel_shared_dpll *pll,
1213 				     struct intel_dpll_hw_state *hw_state)
1214 {
1215 	u32 val;
1216 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1217 	const enum intel_dpll_id id = pll->info->id;
1218 	intel_wakeref_t wakeref;
1219 	bool ret;
1220 
1221 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1222 						     POWER_DOMAIN_DISPLAY_CORE);
1223 	if (!wakeref)
1224 		return false;
1225 
1226 	ret = false;
1227 
1228 	val = intel_de_read(dev_priv, regs[id].ctl);
1229 	if (!(val & LCPLL_PLL_ENABLE))
1230 		goto out;
1231 
1232 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1233 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1234 
1235 	/* avoid reading back stale values if HDMI mode is not enabled */
1236 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1237 		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1238 		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1239 	}
1240 	ret = true;
1241 
1242 out:
1243 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1244 
1245 	return ret;
1246 }
1247 
1248 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1249 				       struct intel_shared_dpll *pll,
1250 				       struct intel_dpll_hw_state *hw_state)
1251 {
1252 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1253 	const enum intel_dpll_id id = pll->info->id;
1254 	intel_wakeref_t wakeref;
1255 	u32 val;
1256 	bool ret;
1257 
1258 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1259 						     POWER_DOMAIN_DISPLAY_CORE);
1260 	if (!wakeref)
1261 		return false;
1262 
1263 	ret = false;
1264 
1265 	/* DPLL0 is always enabled since it drives CDCLK */
1266 	val = intel_de_read(dev_priv, regs[id].ctl);
1267 	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1268 		goto out;
1269 
1270 	val = intel_de_read(dev_priv, DPLL_CTRL1);
1271 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1272 
1273 	ret = true;
1274 
1275 out:
1276 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1277 
1278 	return ret;
1279 }
1280 
1281 struct skl_wrpll_context {
1282 	u64 min_deviation;		/* current minimal deviation */
1283 	u64 central_freq;		/* chosen central freq */
1284 	u64 dco_freq;			/* chosen dco freq */
1285 	unsigned int p;			/* chosen divider */
1286 };
1287 
1288 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1289 {
1290 	memset(ctx, 0, sizeof(*ctx));
1291 
1292 	ctx->min_deviation = U64_MAX;
1293 }
1294 
1295 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1296 #define SKL_DCO_MAX_PDEVIATION	100
1297 #define SKL_DCO_MAX_NDEVIATION	600
1298 
1299 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1300 				  u64 central_freq,
1301 				  u64 dco_freq,
1302 				  unsigned int divider)
1303 {
1304 	u64 deviation;
1305 
1306 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1307 			      central_freq);
1308 
1309 	/* positive deviation */
1310 	if (dco_freq >= central_freq) {
1311 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1312 		    deviation < ctx->min_deviation) {
1313 			ctx->min_deviation = deviation;
1314 			ctx->central_freq = central_freq;
1315 			ctx->dco_freq = dco_freq;
1316 			ctx->p = divider;
1317 		}
1318 	/* negative deviation */
1319 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1320 		   deviation < ctx->min_deviation) {
1321 		ctx->min_deviation = deviation;
1322 		ctx->central_freq = central_freq;
1323 		ctx->dco_freq = dco_freq;
1324 		ctx->p = divider;
1325 	}
1326 }
1327 
1328 static void skl_wrpll_get_multipliers(unsigned int p,
1329 				      unsigned int *p0 /* out */,
1330 				      unsigned int *p1 /* out */,
1331 				      unsigned int *p2 /* out */)
1332 {
1333 	/* even dividers */
1334 	if (p % 2 == 0) {
1335 		unsigned int half = p / 2;
1336 
1337 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1338 			*p0 = 2;
1339 			*p1 = 1;
1340 			*p2 = half;
1341 		} else if (half % 2 == 0) {
1342 			*p0 = 2;
1343 			*p1 = half / 2;
1344 			*p2 = 2;
1345 		} else if (half % 3 == 0) {
1346 			*p0 = 3;
1347 			*p1 = half / 3;
1348 			*p2 = 2;
1349 		} else if (half % 7 == 0) {
1350 			*p0 = 7;
1351 			*p1 = half / 7;
1352 			*p2 = 2;
1353 		}
1354 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1355 		*p0 = 3;
1356 		*p1 = 1;
1357 		*p2 = p / 3;
1358 	} else if (p == 5 || p == 7) {
1359 		*p0 = p;
1360 		*p1 = 1;
1361 		*p2 = 1;
1362 	} else if (p == 15) {
1363 		*p0 = 3;
1364 		*p1 = 1;
1365 		*p2 = 5;
1366 	} else if (p == 21) {
1367 		*p0 = 7;
1368 		*p1 = 1;
1369 		*p2 = 3;
1370 	} else if (p == 35) {
1371 		*p0 = 7;
1372 		*p1 = 1;
1373 		*p2 = 5;
1374 	}
1375 }
1376 
1377 struct skl_wrpll_params {
1378 	u32 dco_fraction;
1379 	u32 dco_integer;
1380 	u32 qdiv_ratio;
1381 	u32 qdiv_mode;
1382 	u32 kdiv;
1383 	u32 pdiv;
1384 	u32 central_freq;
1385 };
1386 
1387 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1388 				      u64 afe_clock,
1389 				      int ref_clock,
1390 				      u64 central_freq,
1391 				      u32 p0, u32 p1, u32 p2)
1392 {
1393 	u64 dco_freq;
1394 
1395 	switch (central_freq) {
1396 	case 9600000000ULL:
1397 		params->central_freq = 0;
1398 		break;
1399 	case 9000000000ULL:
1400 		params->central_freq = 1;
1401 		break;
1402 	case 8400000000ULL:
1403 		params->central_freq = 3;
1404 	}
1405 
1406 	switch (p0) {
1407 	case 1:
1408 		params->pdiv = 0;
1409 		break;
1410 	case 2:
1411 		params->pdiv = 1;
1412 		break;
1413 	case 3:
1414 		params->pdiv = 2;
1415 		break;
1416 	case 7:
1417 		params->pdiv = 4;
1418 		break;
1419 	default:
1420 		WARN(1, "Incorrect PDiv\n");
1421 	}
1422 
1423 	switch (p2) {
1424 	case 5:
1425 		params->kdiv = 0;
1426 		break;
1427 	case 2:
1428 		params->kdiv = 1;
1429 		break;
1430 	case 3:
1431 		params->kdiv = 2;
1432 		break;
1433 	case 1:
1434 		params->kdiv = 3;
1435 		break;
1436 	default:
1437 		WARN(1, "Incorrect KDiv\n");
1438 	}
1439 
1440 	params->qdiv_ratio = p1;
1441 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1442 
1443 	dco_freq = p0 * p1 * p2 * afe_clock;
1444 
1445 	/*
1446 	 * Intermediate values are in Hz.
1447 	 * Divide by MHz to match bsepc
1448 	 */
1449 	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1450 	params->dco_fraction =
1451 		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1452 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1453 }
1454 
1455 static bool
1456 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1457 			int ref_clock,
1458 			struct skl_wrpll_params *wrpll_params)
1459 {
1460 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1461 	u64 dco_central_freq[3] = { 8400000000ULL,
1462 				    9000000000ULL,
1463 				    9600000000ULL };
1464 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1465 					     24, 28, 30, 32, 36, 40, 42, 44,
1466 					     48, 52, 54, 56, 60, 64, 66, 68,
1467 					     70, 72, 76, 78, 80, 84, 88, 90,
1468 					     92, 96, 98 };
1469 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1470 	static const struct {
1471 		const int *list;
1472 		int n_dividers;
1473 	} dividers[] = {
1474 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1475 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1476 	};
1477 	struct skl_wrpll_context ctx;
1478 	unsigned int dco, d, i;
1479 	unsigned int p0, p1, p2;
1480 
1481 	skl_wrpll_context_init(&ctx);
1482 
1483 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1484 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1485 			for (i = 0; i < dividers[d].n_dividers; i++) {
1486 				unsigned int p = dividers[d].list[i];
1487 				u64 dco_freq = p * afe_clock;
1488 
1489 				skl_wrpll_try_divider(&ctx,
1490 						      dco_central_freq[dco],
1491 						      dco_freq,
1492 						      p);
1493 				/*
1494 				 * Skip the remaining dividers if we're sure to
1495 				 * have found the definitive divider, we can't
1496 				 * improve a 0 deviation.
1497 				 */
1498 				if (ctx.min_deviation == 0)
1499 					goto skip_remaining_dividers;
1500 			}
1501 		}
1502 
1503 skip_remaining_dividers:
1504 		/*
1505 		 * If a solution is found with an even divider, prefer
1506 		 * this one.
1507 		 */
1508 		if (d == 0 && ctx.p)
1509 			break;
1510 	}
1511 
1512 	if (!ctx.p) {
1513 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1514 		return false;
1515 	}
1516 
1517 	/*
1518 	 * gcc incorrectly analyses that these can be used without being
1519 	 * initialized. To be fair, it's hard to guess.
1520 	 */
1521 	p0 = p1 = p2 = 0;
1522 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1523 	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1524 				  ctx.central_freq, p0, p1, p2);
1525 
1526 	return true;
1527 }
1528 
1529 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1530 {
1531 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1532 	u32 ctrl1, cfgcr1, cfgcr2;
1533 	struct skl_wrpll_params wrpll_params = { 0, };
1534 
1535 	/*
1536 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1537 	 * as the DPLL id in this function.
1538 	 */
1539 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1540 
1541 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1542 
1543 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1544 				     i915->dpll.ref_clks.nssc,
1545 				     &wrpll_params))
1546 		return false;
1547 
1548 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1549 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1550 		wrpll_params.dco_integer;
1551 
1552 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1553 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1554 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1555 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1556 		wrpll_params.central_freq;
1557 
1558 	memset(&crtc_state->dpll_hw_state, 0,
1559 	       sizeof(crtc_state->dpll_hw_state));
1560 
1561 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1562 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1563 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1564 	return true;
1565 }
1566 
1567 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1568 				  const struct intel_shared_dpll *pll)
1569 {
1570 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
1571 	int ref_clock = i915->dpll.ref_clks.nssc;
1572 	u32 p0, p1, p2, dco_freq;
1573 
1574 	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1575 	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1576 
1577 	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
1578 		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1579 	else
1580 		p1 = 1;
1581 
1582 
1583 	switch (p0) {
1584 	case DPLL_CFGCR2_PDIV_1:
1585 		p0 = 1;
1586 		break;
1587 	case DPLL_CFGCR2_PDIV_2:
1588 		p0 = 2;
1589 		break;
1590 	case DPLL_CFGCR2_PDIV_3:
1591 		p0 = 3;
1592 		break;
1593 	case DPLL_CFGCR2_PDIV_7:
1594 		p0 = 7;
1595 		break;
1596 	}
1597 
1598 	switch (p2) {
1599 	case DPLL_CFGCR2_KDIV_5:
1600 		p2 = 5;
1601 		break;
1602 	case DPLL_CFGCR2_KDIV_2:
1603 		p2 = 2;
1604 		break;
1605 	case DPLL_CFGCR2_KDIV_3:
1606 		p2 = 3;
1607 		break;
1608 	case DPLL_CFGCR2_KDIV_1:
1609 		p2 = 1;
1610 		break;
1611 	}
1612 
1613 	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1614 		   ref_clock;
1615 
1616 	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1617 		    ref_clock / 0x8000;
1618 
1619 	if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
1620 		return 0;
1621 
1622 	return dco_freq / (p0 * p1 * p2 * 5);
1623 }
1624 
1625 static bool
1626 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1627 {
1628 	u32 ctrl1;
1629 
1630 	/*
1631 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1632 	 * as the DPLL id in this function.
1633 	 */
1634 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1635 	switch (crtc_state->port_clock / 2) {
1636 	case 81000:
1637 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1638 		break;
1639 	case 135000:
1640 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1641 		break;
1642 	case 270000:
1643 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1644 		break;
1645 		/* eDP 1.4 rates */
1646 	case 162000:
1647 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1648 		break;
1649 	case 108000:
1650 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1651 		break;
1652 	case 216000:
1653 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1654 		break;
1655 	}
1656 
1657 	memset(&crtc_state->dpll_hw_state, 0,
1658 	       sizeof(crtc_state->dpll_hw_state));
1659 
1660 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1661 
1662 	return true;
1663 }
1664 
1665 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1666 				  const struct intel_shared_dpll *pll)
1667 {
1668 	int link_clock = 0;
1669 
1670 	switch ((pll->state.hw_state.ctrl1 &
1671 		 DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1672 		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1673 	case DPLL_CTRL1_LINK_RATE_810:
1674 		link_clock = 81000;
1675 		break;
1676 	case DPLL_CTRL1_LINK_RATE_1080:
1677 		link_clock = 108000;
1678 		break;
1679 	case DPLL_CTRL1_LINK_RATE_1350:
1680 		link_clock = 135000;
1681 		break;
1682 	case DPLL_CTRL1_LINK_RATE_1620:
1683 		link_clock = 162000;
1684 		break;
1685 	case DPLL_CTRL1_LINK_RATE_2160:
1686 		link_clock = 216000;
1687 		break;
1688 	case DPLL_CTRL1_LINK_RATE_2700:
1689 		link_clock = 270000;
1690 		break;
1691 	default:
1692 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1693 		break;
1694 	}
1695 
1696 	return link_clock * 2;
1697 }
1698 
1699 static bool skl_get_dpll(struct intel_atomic_state *state,
1700 			 struct intel_crtc *crtc,
1701 			 struct intel_encoder *encoder)
1702 {
1703 	struct intel_crtc_state *crtc_state =
1704 		intel_atomic_get_new_crtc_state(state, crtc);
1705 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1706 	struct intel_shared_dpll *pll;
1707 	bool bret;
1708 
1709 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1710 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1711 		if (!bret) {
1712 			drm_dbg_kms(&i915->drm,
1713 				    "Could not get HDMI pll dividers.\n");
1714 			return false;
1715 		}
1716 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1717 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1718 		if (!bret) {
1719 			drm_dbg_kms(&i915->drm,
1720 				    "Could not set DP dpll HW state.\n");
1721 			return false;
1722 		}
1723 	} else {
1724 		return false;
1725 	}
1726 
1727 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1728 		pll = intel_find_shared_dpll(state, crtc,
1729 					     &crtc_state->dpll_hw_state,
1730 					     BIT(DPLL_ID_SKL_DPLL0));
1731 	else
1732 		pll = intel_find_shared_dpll(state, crtc,
1733 					     &crtc_state->dpll_hw_state,
1734 					     BIT(DPLL_ID_SKL_DPLL3) |
1735 					     BIT(DPLL_ID_SKL_DPLL2) |
1736 					     BIT(DPLL_ID_SKL_DPLL1));
1737 	if (!pll)
1738 		return false;
1739 
1740 	intel_reference_shared_dpll(state, crtc,
1741 				    pll, &crtc_state->dpll_hw_state);
1742 
1743 	crtc_state->shared_dpll = pll;
1744 
1745 	return true;
1746 }
1747 
1748 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1749 				const struct intel_shared_dpll *pll)
1750 {
1751 	/*
1752 	 * ctrl1 register is already shifted for each pll, just use 0 to get
1753 	 * the internal shift for each field
1754 	 */
1755 	if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1756 		return skl_ddi_wrpll_get_freq(i915, pll);
1757 	else
1758 		return skl_ddi_lcpll_get_freq(i915, pll);
1759 }
1760 
1761 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1762 {
1763 	/* No SSC ref */
1764 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1765 }
1766 
1767 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1768 			      const struct intel_dpll_hw_state *hw_state)
1769 {
1770 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1771 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1772 		      hw_state->ctrl1,
1773 		      hw_state->cfgcr1,
1774 		      hw_state->cfgcr2);
1775 }
1776 
1777 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1778 	.enable = skl_ddi_pll_enable,
1779 	.disable = skl_ddi_pll_disable,
1780 	.get_hw_state = skl_ddi_pll_get_hw_state,
1781 	.get_freq = skl_ddi_pll_get_freq,
1782 };
1783 
1784 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1785 	.enable = skl_ddi_dpll0_enable,
1786 	.disable = skl_ddi_dpll0_disable,
1787 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1788 	.get_freq = skl_ddi_pll_get_freq,
1789 };
1790 
1791 static const struct dpll_info skl_plls[] = {
1792 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1793 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
1794 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
1795 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
1796 	{ },
1797 };
1798 
1799 static const struct intel_dpll_mgr skl_pll_mgr = {
1800 	.dpll_info = skl_plls,
1801 	.get_dplls = skl_get_dpll,
1802 	.put_dplls = intel_put_dpll,
1803 	.update_ref_clks = skl_update_dpll_ref_clks,
1804 	.dump_hw_state = skl_dump_hw_state,
1805 };
1806 
1807 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1808 				struct intel_shared_dpll *pll)
1809 {
1810 	u32 temp;
1811 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1812 	enum dpio_phy phy;
1813 	enum dpio_channel ch;
1814 
1815 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1816 
1817 	/* Non-SSC reference */
1818 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1819 	temp |= PORT_PLL_REF_SEL;
1820 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1821 
1822 	if (IS_GEMINILAKE(dev_priv)) {
1823 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1824 		temp |= PORT_PLL_POWER_ENABLE;
1825 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1826 
1827 		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1828 				 PORT_PLL_POWER_STATE), 200))
1829 			drm_err(&dev_priv->drm,
1830 				"Power state not set for PLL:%d\n", port);
1831 	}
1832 
1833 	/* Disable 10 bit clock */
1834 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1835 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1836 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1837 
1838 	/* Write P1 & P2 */
1839 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1840 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1841 	temp |= pll->state.hw_state.ebb0;
1842 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1843 
1844 	/* Write M2 integer */
1845 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1846 	temp &= ~PORT_PLL_M2_MASK;
1847 	temp |= pll->state.hw_state.pll0;
1848 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1849 
1850 	/* Write N */
1851 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1852 	temp &= ~PORT_PLL_N_MASK;
1853 	temp |= pll->state.hw_state.pll1;
1854 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1855 
1856 	/* Write M2 fraction */
1857 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1858 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1859 	temp |= pll->state.hw_state.pll2;
1860 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1861 
1862 	/* Write M2 fraction enable */
1863 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1864 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1865 	temp |= pll->state.hw_state.pll3;
1866 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1867 
1868 	/* Write coeff */
1869 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1870 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1871 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1872 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1873 	temp |= pll->state.hw_state.pll6;
1874 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1875 
1876 	/* Write calibration val */
1877 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1878 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1879 	temp |= pll->state.hw_state.pll8;
1880 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1881 
1882 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1883 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1884 	temp |= pll->state.hw_state.pll9;
1885 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1886 
1887 	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1888 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1889 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1890 	temp |= pll->state.hw_state.pll10;
1891 	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1892 
1893 	/* Recalibrate with new settings */
1894 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1895 	temp |= PORT_PLL_RECALIBRATE;
1896 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1897 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1898 	temp |= pll->state.hw_state.ebb4;
1899 	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1900 
1901 	/* Enable PLL */
1902 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1903 	temp |= PORT_PLL_ENABLE;
1904 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1905 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1906 
1907 	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1908 			200))
1909 		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1910 
1911 	if (IS_GEMINILAKE(dev_priv)) {
1912 		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1913 		temp |= DCC_DELAY_RANGE_2;
1914 		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1915 	}
1916 
1917 	/*
1918 	 * While we write to the group register to program all lanes at once we
1919 	 * can read only lane registers and we pick lanes 0/1 for that.
1920 	 */
1921 	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1922 	temp &= ~LANE_STAGGER_MASK;
1923 	temp &= ~LANESTAGGER_STRAP_OVRD;
1924 	temp |= pll->state.hw_state.pcsdw12;
1925 	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1926 }
1927 
1928 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1929 					struct intel_shared_dpll *pll)
1930 {
1931 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1932 	u32 temp;
1933 
1934 	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1935 	temp &= ~PORT_PLL_ENABLE;
1936 	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1937 	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1938 
1939 	if (IS_GEMINILAKE(dev_priv)) {
1940 		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1941 		temp &= ~PORT_PLL_POWER_ENABLE;
1942 		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1943 
1944 		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1945 				  PORT_PLL_POWER_STATE), 200))
1946 			drm_err(&dev_priv->drm,
1947 				"Power state not reset for PLL:%d\n", port);
1948 	}
1949 }
1950 
1951 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1952 					struct intel_shared_dpll *pll,
1953 					struct intel_dpll_hw_state *hw_state)
1954 {
1955 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1956 	intel_wakeref_t wakeref;
1957 	enum dpio_phy phy;
1958 	enum dpio_channel ch;
1959 	u32 val;
1960 	bool ret;
1961 
1962 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1963 
1964 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1965 						     POWER_DOMAIN_DISPLAY_CORE);
1966 	if (!wakeref)
1967 		return false;
1968 
1969 	ret = false;
1970 
1971 	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1972 	if (!(val & PORT_PLL_ENABLE))
1973 		goto out;
1974 
1975 	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1976 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1977 
1978 	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1979 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1980 
1981 	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1982 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1983 
1984 	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1985 	hw_state->pll1 &= PORT_PLL_N_MASK;
1986 
1987 	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1988 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1989 
1990 	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1991 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1992 
1993 	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1994 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1995 			  PORT_PLL_INT_COEFF_MASK |
1996 			  PORT_PLL_GAIN_CTL_MASK;
1997 
1998 	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1999 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2000 
2001 	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2002 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2003 
2004 	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2005 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2006 			   PORT_PLL_DCO_AMP_MASK;
2007 
2008 	/*
2009 	 * While we write to the group register to program all lanes at once we
2010 	 * can read only lane registers. We configure all lanes the same way, so
2011 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2012 	 */
2013 	hw_state->pcsdw12 = intel_de_read(dev_priv,
2014 					  BXT_PORT_PCS_DW12_LN01(phy, ch));
2015 	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2016 		drm_dbg(&dev_priv->drm,
2017 			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2018 			hw_state->pcsdw12,
2019 			intel_de_read(dev_priv,
2020 				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2021 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2022 
2023 	ret = true;
2024 
2025 out:
2026 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2027 
2028 	return ret;
2029 }
2030 
2031 /* bxt clock parameters */
2032 struct bxt_clk_div {
2033 	int clock;
2034 	u32 p1;
2035 	u32 p2;
2036 	u32 m2_int;
2037 	u32 m2_frac;
2038 	bool m2_frac_en;
2039 	u32 n;
2040 
2041 	int vco;
2042 };
2043 
2044 /* pre-calculated values for DP linkrates */
2045 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2046 	{162000, 4, 2, 32, 1677722, 1, 1},
2047 	{270000, 4, 1, 27,       0, 0, 1},
2048 	{540000, 2, 1, 27,       0, 0, 1},
2049 	{216000, 3, 2, 32, 1677722, 1, 1},
2050 	{243000, 4, 1, 24, 1258291, 1, 1},
2051 	{324000, 4, 1, 32, 1677722, 1, 1},
2052 	{432000, 3, 1, 32, 1677722, 1, 1}
2053 };
2054 
2055 static bool
2056 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2057 			  struct bxt_clk_div *clk_div)
2058 {
2059 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2060 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2061 	struct dpll best_clock;
2062 
2063 	/* Calculate HDMI div */
2064 	/*
2065 	 * FIXME: tie the following calculation into
2066 	 * i9xx_crtc_compute_clock
2067 	 */
2068 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2069 		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2070 			crtc_state->port_clock,
2071 			pipe_name(crtc->pipe));
2072 		return false;
2073 	}
2074 
2075 	clk_div->p1 = best_clock.p1;
2076 	clk_div->p2 = best_clock.p2;
2077 	WARN_ON(best_clock.m1 != 2);
2078 	clk_div->n = best_clock.n;
2079 	clk_div->m2_int = best_clock.m2 >> 22;
2080 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2081 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
2082 
2083 	clk_div->vco = best_clock.vco;
2084 
2085 	return true;
2086 }
2087 
2088 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2089 				    struct bxt_clk_div *clk_div)
2090 {
2091 	int clock = crtc_state->port_clock;
2092 	int i;
2093 
2094 	*clk_div = bxt_dp_clk_val[0];
2095 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2096 		if (bxt_dp_clk_val[i].clock == clock) {
2097 			*clk_div = bxt_dp_clk_val[i];
2098 			break;
2099 		}
2100 	}
2101 
2102 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2103 }
2104 
2105 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2106 				      const struct bxt_clk_div *clk_div)
2107 {
2108 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2109 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2110 	int clock = crtc_state->port_clock;
2111 	int vco = clk_div->vco;
2112 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2113 	u32 lanestagger;
2114 
2115 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2116 
2117 	if (vco >= 6200000 && vco <= 6700000) {
2118 		prop_coef = 4;
2119 		int_coef = 9;
2120 		gain_ctl = 3;
2121 		targ_cnt = 8;
2122 	} else if ((vco > 5400000 && vco < 6200000) ||
2123 			(vco >= 4800000 && vco < 5400000)) {
2124 		prop_coef = 5;
2125 		int_coef = 11;
2126 		gain_ctl = 3;
2127 		targ_cnt = 9;
2128 	} else if (vco == 5400000) {
2129 		prop_coef = 3;
2130 		int_coef = 8;
2131 		gain_ctl = 1;
2132 		targ_cnt = 9;
2133 	} else {
2134 		drm_err(&i915->drm, "Invalid VCO\n");
2135 		return false;
2136 	}
2137 
2138 	if (clock > 270000)
2139 		lanestagger = 0x18;
2140 	else if (clock > 135000)
2141 		lanestagger = 0x0d;
2142 	else if (clock > 67000)
2143 		lanestagger = 0x07;
2144 	else if (clock > 33000)
2145 		lanestagger = 0x04;
2146 	else
2147 		lanestagger = 0x02;
2148 
2149 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2150 	dpll_hw_state->pll0 = clk_div->m2_int;
2151 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2152 	dpll_hw_state->pll2 = clk_div->m2_frac;
2153 
2154 	if (clk_div->m2_frac_en)
2155 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2156 
2157 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2158 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2159 
2160 	dpll_hw_state->pll8 = targ_cnt;
2161 
2162 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2163 
2164 	dpll_hw_state->pll10 =
2165 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2166 		| PORT_PLL_DCO_AMP_OVR_EN_H;
2167 
2168 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2169 
2170 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2171 
2172 	return true;
2173 }
2174 
2175 static bool
2176 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2177 {
2178 	struct bxt_clk_div clk_div = {};
2179 
2180 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2181 
2182 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2183 }
2184 
2185 static bool
2186 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2187 {
2188 	struct bxt_clk_div clk_div = {};
2189 
2190 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2191 
2192 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2193 }
2194 
2195 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2196 				const struct intel_shared_dpll *pll)
2197 {
2198 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2199 	struct dpll clock;
2200 
2201 	clock.m1 = 2;
2202 	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2203 	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2204 		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2205 	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2206 	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2207 	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2208 
2209 	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2210 }
2211 
2212 static bool bxt_get_dpll(struct intel_atomic_state *state,
2213 			 struct intel_crtc *crtc,
2214 			 struct intel_encoder *encoder)
2215 {
2216 	struct intel_crtc_state *crtc_state =
2217 		intel_atomic_get_new_crtc_state(state, crtc);
2218 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2219 	struct intel_shared_dpll *pll;
2220 	enum intel_dpll_id id;
2221 
2222 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2223 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2224 		return false;
2225 
2226 	if (intel_crtc_has_dp_encoder(crtc_state) &&
2227 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2228 		return false;
2229 
2230 	/* 1:1 mapping between ports and PLLs */
2231 	id = (enum intel_dpll_id) encoder->port;
2232 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2233 
2234 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2235 		    crtc->base.base.id, crtc->base.name, pll->info->name);
2236 
2237 	intel_reference_shared_dpll(state, crtc,
2238 				    pll, &crtc_state->dpll_hw_state);
2239 
2240 	crtc_state->shared_dpll = pll;
2241 
2242 	return true;
2243 }
2244 
2245 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2246 {
2247 	i915->dpll.ref_clks.ssc = 100000;
2248 	i915->dpll.ref_clks.nssc = 100000;
2249 	/* DSI non-SSC ref 19.2MHz */
2250 }
2251 
2252 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2253 			      const struct intel_dpll_hw_state *hw_state)
2254 {
2255 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2256 		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2257 		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2258 		    hw_state->ebb0,
2259 		    hw_state->ebb4,
2260 		    hw_state->pll0,
2261 		    hw_state->pll1,
2262 		    hw_state->pll2,
2263 		    hw_state->pll3,
2264 		    hw_state->pll6,
2265 		    hw_state->pll8,
2266 		    hw_state->pll9,
2267 		    hw_state->pll10,
2268 		    hw_state->pcsdw12);
2269 }
2270 
2271 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2272 	.enable = bxt_ddi_pll_enable,
2273 	.disable = bxt_ddi_pll_disable,
2274 	.get_hw_state = bxt_ddi_pll_get_hw_state,
2275 	.get_freq = bxt_ddi_pll_get_freq,
2276 };
2277 
2278 static const struct dpll_info bxt_plls[] = {
2279 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2280 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2281 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2282 	{ },
2283 };
2284 
2285 static const struct intel_dpll_mgr bxt_pll_mgr = {
2286 	.dpll_info = bxt_plls,
2287 	.get_dplls = bxt_get_dpll,
2288 	.put_dplls = intel_put_dpll,
2289 	.update_ref_clks = bxt_update_dpll_ref_clks,
2290 	.dump_hw_state = bxt_dump_hw_state,
2291 };
2292 
2293 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2294 			       struct intel_shared_dpll *pll)
2295 {
2296 	const enum intel_dpll_id id = pll->info->id;
2297 	u32 val;
2298 
2299 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2300 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2301 	val |= PLL_POWER_ENABLE;
2302 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2303 
2304 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2305 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2306 				  PLL_POWER_STATE, 5))
2307 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2308 
2309 	/*
2310 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2311 	 * select DP mode, and set DP link rate.
2312 	 */
2313 	val = pll->state.hw_state.cfgcr0;
2314 	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2315 
2316 	/* 4. Reab back to ensure writes completed */
2317 	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2318 
2319 	/* 3. Configure DPLL_CFGCR0 */
2320 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2321 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2322 		val = pll->state.hw_state.cfgcr1;
2323 		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2324 		/* 4. Reab back to ensure writes completed */
2325 		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2326 	}
2327 
2328 	/*
2329 	 * 5. If the frequency will result in a change to the voltage
2330 	 * requirement, follow the Display Voltage Frequency Switching
2331 	 * Sequence Before Frequency Change
2332 	 *
2333 	 * Note: DVFS is actually handled via the cdclk code paths,
2334 	 * hence we do nothing here.
2335 	 */
2336 
2337 	/* 6. Enable DPLL in DPLL_ENABLE. */
2338 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2339 	val |= PLL_ENABLE;
2340 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2341 
2342 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2343 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2344 		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2345 
2346 	/*
2347 	 * 8. If the frequency will result in a change to the voltage
2348 	 * requirement, follow the Display Voltage Frequency Switching
2349 	 * Sequence After Frequency Change
2350 	 *
2351 	 * Note: DVFS is actually handled via the cdclk code paths,
2352 	 * hence we do nothing here.
2353 	 */
2354 
2355 	/*
2356 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2357 	 * Done at intel_ddi_clk_select
2358 	 */
2359 }
2360 
2361 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2362 				struct intel_shared_dpll *pll)
2363 {
2364 	const enum intel_dpll_id id = pll->info->id;
2365 	u32 val;
2366 
2367 	/*
2368 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2369 	 * Done at intel_ddi_post_disable
2370 	 */
2371 
2372 	/*
2373 	 * 2. If the frequency will result in a change to the voltage
2374 	 * requirement, follow the Display Voltage Frequency Switching
2375 	 * Sequence Before Frequency Change
2376 	 *
2377 	 * Note: DVFS is actually handled via the cdclk code paths,
2378 	 * hence we do nothing here.
2379 	 */
2380 
2381 	/* 3. Disable DPLL through DPLL_ENABLE. */
2382 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2383 	val &= ~PLL_ENABLE;
2384 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2385 
2386 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2387 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2388 		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2389 
2390 	/*
2391 	 * 5. If the frequency will result in a change to the voltage
2392 	 * requirement, follow the Display Voltage Frequency Switching
2393 	 * Sequence After Frequency Change
2394 	 *
2395 	 * Note: DVFS is actually handled via the cdclk code paths,
2396 	 * hence we do nothing here.
2397 	 */
2398 
2399 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2400 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2401 	val &= ~PLL_POWER_ENABLE;
2402 	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2403 
2404 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2405 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2406 				    PLL_POWER_STATE, 5))
2407 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2408 }
2409 
2410 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2411 				     struct intel_shared_dpll *pll,
2412 				     struct intel_dpll_hw_state *hw_state)
2413 {
2414 	const enum intel_dpll_id id = pll->info->id;
2415 	intel_wakeref_t wakeref;
2416 	u32 val;
2417 	bool ret;
2418 
2419 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2420 						     POWER_DOMAIN_DISPLAY_CORE);
2421 	if (!wakeref)
2422 		return false;
2423 
2424 	ret = false;
2425 
2426 	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2427 	if (!(val & PLL_ENABLE))
2428 		goto out;
2429 
2430 	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2431 	hw_state->cfgcr0 = val;
2432 
2433 	/* avoid reading back stale values if HDMI mode is not enabled */
2434 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2435 		hw_state->cfgcr1 = intel_de_read(dev_priv,
2436 						 CNL_DPLL_CFGCR1(id));
2437 	}
2438 	ret = true;
2439 
2440 out:
2441 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2442 
2443 	return ret;
2444 }
2445 
2446 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2447 				      int *qdiv, int *kdiv)
2448 {
2449 	/* even dividers */
2450 	if (bestdiv % 2 == 0) {
2451 		if (bestdiv == 2) {
2452 			*pdiv = 2;
2453 			*qdiv = 1;
2454 			*kdiv = 1;
2455 		} else if (bestdiv % 4 == 0) {
2456 			*pdiv = 2;
2457 			*qdiv = bestdiv / 4;
2458 			*kdiv = 2;
2459 		} else if (bestdiv % 6 == 0) {
2460 			*pdiv = 3;
2461 			*qdiv = bestdiv / 6;
2462 			*kdiv = 2;
2463 		} else if (bestdiv % 5 == 0) {
2464 			*pdiv = 5;
2465 			*qdiv = bestdiv / 10;
2466 			*kdiv = 2;
2467 		} else if (bestdiv % 14 == 0) {
2468 			*pdiv = 7;
2469 			*qdiv = bestdiv / 14;
2470 			*kdiv = 2;
2471 		}
2472 	} else {
2473 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2474 			*pdiv = bestdiv;
2475 			*qdiv = 1;
2476 			*kdiv = 1;
2477 		} else { /* 9, 15, 21 */
2478 			*pdiv = bestdiv / 3;
2479 			*qdiv = 1;
2480 			*kdiv = 3;
2481 		}
2482 	}
2483 }
2484 
2485 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2486 				      u32 dco_freq, u32 ref_freq,
2487 				      int pdiv, int qdiv, int kdiv)
2488 {
2489 	u32 dco;
2490 
2491 	switch (kdiv) {
2492 	case 1:
2493 		params->kdiv = 1;
2494 		break;
2495 	case 2:
2496 		params->kdiv = 2;
2497 		break;
2498 	case 3:
2499 		params->kdiv = 4;
2500 		break;
2501 	default:
2502 		WARN(1, "Incorrect KDiv\n");
2503 	}
2504 
2505 	switch (pdiv) {
2506 	case 2:
2507 		params->pdiv = 1;
2508 		break;
2509 	case 3:
2510 		params->pdiv = 2;
2511 		break;
2512 	case 5:
2513 		params->pdiv = 4;
2514 		break;
2515 	case 7:
2516 		params->pdiv = 8;
2517 		break;
2518 	default:
2519 		WARN(1, "Incorrect PDiv\n");
2520 	}
2521 
2522 	WARN_ON(kdiv != 2 && qdiv != 1);
2523 
2524 	params->qdiv_ratio = qdiv;
2525 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2526 
2527 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2528 
2529 	params->dco_integer = dco >> 15;
2530 	params->dco_fraction = dco & 0x7fff;
2531 }
2532 
2533 static bool
2534 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2535 			  struct skl_wrpll_params *wrpll_params,
2536 			  int ref_clock)
2537 {
2538 	u32 afe_clock = crtc_state->port_clock * 5;
2539 	u32 dco_min = 7998000;
2540 	u32 dco_max = 10000000;
2541 	u32 dco_mid = (dco_min + dco_max) / 2;
2542 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2543 					 18, 20, 24, 28, 30, 32,  36,  40,
2544 					 42, 44, 48, 50, 52, 54,  56,  60,
2545 					 64, 66, 68, 70, 72, 76,  78,  80,
2546 					 84, 88, 90, 92, 96, 98, 100, 102,
2547 					  3,  5,  7,  9, 15, 21 };
2548 	u32 dco, best_dco = 0, dco_centrality = 0;
2549 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2550 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2551 
2552 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2553 		dco = afe_clock * dividers[d];
2554 
2555 		if ((dco <= dco_max) && (dco >= dco_min)) {
2556 			dco_centrality = abs(dco - dco_mid);
2557 
2558 			if (dco_centrality < best_dco_centrality) {
2559 				best_dco_centrality = dco_centrality;
2560 				best_div = dividers[d];
2561 				best_dco = dco;
2562 			}
2563 		}
2564 	}
2565 
2566 	if (best_div == 0)
2567 		return false;
2568 
2569 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2570 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2571 				  pdiv, qdiv, kdiv);
2572 
2573 	return true;
2574 }
2575 
2576 static bool
2577 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2578 			struct skl_wrpll_params *wrpll_params)
2579 {
2580 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2581 
2582 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2583 					 i915->dpll.ref_clks.nssc);
2584 }
2585 
2586 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2587 {
2588 	u32 cfgcr0, cfgcr1;
2589 	struct skl_wrpll_params wrpll_params = { 0, };
2590 
2591 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2592 
2593 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2594 		return false;
2595 
2596 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2597 		wrpll_params.dco_integer;
2598 
2599 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2600 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2601 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2602 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2603 		DPLL_CFGCR1_CENTRAL_FREQ;
2604 
2605 	memset(&crtc_state->dpll_hw_state, 0,
2606 	       sizeof(crtc_state->dpll_hw_state));
2607 
2608 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2609 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2610 	return true;
2611 }
2612 
2613 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2614 				    const struct intel_shared_dpll *pll,
2615 				    int ref_clock)
2616 {
2617 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
2618 	u32 p0, p1, p2, dco_freq;
2619 
2620 	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2621 	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2622 
2623 	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2624 		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2625 			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2626 	else
2627 		p1 = 1;
2628 
2629 
2630 	switch (p0) {
2631 	case DPLL_CFGCR1_PDIV_2:
2632 		p0 = 2;
2633 		break;
2634 	case DPLL_CFGCR1_PDIV_3:
2635 		p0 = 3;
2636 		break;
2637 	case DPLL_CFGCR1_PDIV_5:
2638 		p0 = 5;
2639 		break;
2640 	case DPLL_CFGCR1_PDIV_7:
2641 		p0 = 7;
2642 		break;
2643 	}
2644 
2645 	switch (p2) {
2646 	case DPLL_CFGCR1_KDIV_1:
2647 		p2 = 1;
2648 		break;
2649 	case DPLL_CFGCR1_KDIV_2:
2650 		p2 = 2;
2651 		break;
2652 	case DPLL_CFGCR1_KDIV_3:
2653 		p2 = 3;
2654 		break;
2655 	}
2656 
2657 	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2658 		   ref_clock;
2659 
2660 	dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2661 		      DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
2662 
2663 	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2664 		return 0;
2665 
2666 	return dco_freq / (p0 * p1 * p2 * 5);
2667 }
2668 
2669 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2670 				  const struct intel_shared_dpll *pll)
2671 {
2672 	return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
2673 }
2674 
2675 static bool
2676 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2677 {
2678 	u32 cfgcr0;
2679 
2680 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2681 
2682 	switch (crtc_state->port_clock / 2) {
2683 	case 81000:
2684 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2685 		break;
2686 	case 135000:
2687 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2688 		break;
2689 	case 270000:
2690 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2691 		break;
2692 		/* eDP 1.4 rates */
2693 	case 162000:
2694 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2695 		break;
2696 	case 108000:
2697 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2698 		break;
2699 	case 216000:
2700 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2701 		break;
2702 	case 324000:
2703 		/* Some SKUs may require elevated I/O voltage to support this */
2704 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2705 		break;
2706 	case 405000:
2707 		/* Some SKUs may require elevated I/O voltage to support this */
2708 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2709 		break;
2710 	}
2711 
2712 	memset(&crtc_state->dpll_hw_state, 0,
2713 	       sizeof(crtc_state->dpll_hw_state));
2714 
2715 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2716 
2717 	return true;
2718 }
2719 
2720 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2721 				  const struct intel_shared_dpll *pll)
2722 {
2723 	int link_clock = 0;
2724 
2725 	switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2726 	case DPLL_CFGCR0_LINK_RATE_810:
2727 		link_clock = 81000;
2728 		break;
2729 	case DPLL_CFGCR0_LINK_RATE_1080:
2730 		link_clock = 108000;
2731 		break;
2732 	case DPLL_CFGCR0_LINK_RATE_1350:
2733 		link_clock = 135000;
2734 		break;
2735 	case DPLL_CFGCR0_LINK_RATE_1620:
2736 		link_clock = 162000;
2737 		break;
2738 	case DPLL_CFGCR0_LINK_RATE_2160:
2739 		link_clock = 216000;
2740 		break;
2741 	case DPLL_CFGCR0_LINK_RATE_2700:
2742 		link_clock = 270000;
2743 		break;
2744 	case DPLL_CFGCR0_LINK_RATE_3240:
2745 		link_clock = 324000;
2746 		break;
2747 	case DPLL_CFGCR0_LINK_RATE_4050:
2748 		link_clock = 405000;
2749 		break;
2750 	default:
2751 		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2752 		break;
2753 	}
2754 
2755 	return link_clock * 2;
2756 }
2757 
2758 static bool cnl_get_dpll(struct intel_atomic_state *state,
2759 			 struct intel_crtc *crtc,
2760 			 struct intel_encoder *encoder)
2761 {
2762 	struct intel_crtc_state *crtc_state =
2763 		intel_atomic_get_new_crtc_state(state, crtc);
2764 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2765 	struct intel_shared_dpll *pll;
2766 	bool bret;
2767 
2768 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2769 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2770 		if (!bret) {
2771 			drm_dbg_kms(&i915->drm,
2772 				    "Could not get HDMI pll dividers.\n");
2773 			return false;
2774 		}
2775 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2776 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2777 		if (!bret) {
2778 			drm_dbg_kms(&i915->drm,
2779 				    "Could not set DP dpll HW state.\n");
2780 			return false;
2781 		}
2782 	} else {
2783 		drm_dbg_kms(&i915->drm,
2784 			    "Skip DPLL setup for output_types 0x%x\n",
2785 			    crtc_state->output_types);
2786 		return false;
2787 	}
2788 
2789 	pll = intel_find_shared_dpll(state, crtc,
2790 				     &crtc_state->dpll_hw_state,
2791 				     BIT(DPLL_ID_SKL_DPLL2) |
2792 				     BIT(DPLL_ID_SKL_DPLL1) |
2793 				     BIT(DPLL_ID_SKL_DPLL0));
2794 	if (!pll) {
2795 		drm_dbg_kms(&i915->drm, "No PLL selected\n");
2796 		return false;
2797 	}
2798 
2799 	intel_reference_shared_dpll(state, crtc,
2800 				    pll, &crtc_state->dpll_hw_state);
2801 
2802 	crtc_state->shared_dpll = pll;
2803 
2804 	return true;
2805 }
2806 
2807 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2808 				const struct intel_shared_dpll *pll)
2809 {
2810 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2811 		return cnl_ddi_wrpll_get_freq(i915, pll);
2812 	else
2813 		return cnl_ddi_lcpll_get_freq(i915, pll);
2814 }
2815 
2816 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2817 {
2818 	/* No SSC reference */
2819 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2820 }
2821 
2822 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2823 			      const struct intel_dpll_hw_state *hw_state)
2824 {
2825 	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2826 		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2827 		    hw_state->cfgcr0,
2828 		    hw_state->cfgcr1);
2829 }
2830 
2831 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2832 	.enable = cnl_ddi_pll_enable,
2833 	.disable = cnl_ddi_pll_disable,
2834 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2835 	.get_freq = cnl_ddi_pll_get_freq,
2836 };
2837 
2838 static const struct dpll_info cnl_plls[] = {
2839 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2840 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2841 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2842 	{ },
2843 };
2844 
2845 static const struct intel_dpll_mgr cnl_pll_mgr = {
2846 	.dpll_info = cnl_plls,
2847 	.get_dplls = cnl_get_dpll,
2848 	.put_dplls = intel_put_dpll,
2849 	.update_ref_clks = cnl_update_dpll_ref_clks,
2850 	.dump_hw_state = cnl_dump_hw_state,
2851 };
2852 
2853 struct icl_combo_pll_params {
2854 	int clock;
2855 	struct skl_wrpll_params wrpll;
2856 };
2857 
2858 /*
2859  * These values alrea already adjusted: they're the bits we write to the
2860  * registers, not the logical values.
2861  */
2862 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2863 	{ 540000,
2864 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2865 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2866 	{ 270000,
2867 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2868 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2869 	{ 162000,
2870 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2871 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2872 	{ 324000,
2873 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2874 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2875 	{ 216000,
2876 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2877 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2878 	{ 432000,
2879 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2880 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2881 	{ 648000,
2882 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2883 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2884 	{ 810000,
2885 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2886 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2887 };
2888 
2889 
2890 /* Also used for 38.4 MHz values. */
2891 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2892 	{ 540000,
2893 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2894 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2895 	{ 270000,
2896 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2897 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2898 	{ 162000,
2899 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2900 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2901 	{ 324000,
2902 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2903 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2904 	{ 216000,
2905 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2906 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2907 	{ 432000,
2908 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2909 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2910 	{ 648000,
2911 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2912 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2913 	{ 810000,
2914 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2915 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2916 };
2917 
2918 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2919 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2920 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2921 };
2922 
2923 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2924 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2925 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2926 };
2927 
2928 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2929 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2930 	/* the following params are unused */
2931 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2932 };
2933 
2934 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2935 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2936 	/* the following params are unused */
2937 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2938 };
2939 
2940 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2941 				  struct skl_wrpll_params *pll_params)
2942 {
2943 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2944 	const struct icl_combo_pll_params *params =
2945 		dev_priv->dpll.ref_clks.nssc == 24000 ?
2946 		icl_dp_combo_pll_24MHz_values :
2947 		icl_dp_combo_pll_19_2MHz_values;
2948 	int clock = crtc_state->port_clock;
2949 	int i;
2950 
2951 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2952 		if (clock == params[i].clock) {
2953 			*pll_params = params[i].wrpll;
2954 			return true;
2955 		}
2956 	}
2957 
2958 	MISSING_CASE(clock);
2959 	return false;
2960 }
2961 
2962 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2963 			     struct skl_wrpll_params *pll_params)
2964 {
2965 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2966 
2967 	if (INTEL_GEN(dev_priv) >= 12) {
2968 		switch (dev_priv->dpll.ref_clks.nssc) {
2969 		default:
2970 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2971 			/* fall-through */
2972 		case 19200:
2973 		case 38400:
2974 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2975 			break;
2976 		case 24000:
2977 			*pll_params = tgl_tbt_pll_24MHz_values;
2978 			break;
2979 		}
2980 	} else {
2981 		switch (dev_priv->dpll.ref_clks.nssc) {
2982 		default:
2983 			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
2984 			/* fall-through */
2985 		case 19200:
2986 		case 38400:
2987 			*pll_params = icl_tbt_pll_19_2MHz_values;
2988 			break;
2989 		case 24000:
2990 			*pll_params = icl_tbt_pll_24MHz_values;
2991 			break;
2992 		}
2993 	}
2994 
2995 	return true;
2996 }
2997 
2998 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2999 				    const struct intel_shared_dpll *pll)
3000 {
3001 	/*
3002 	 * The PLL outputs multiple frequencies at the same time, selection is
3003 	 * made at DDI clock mux level.
3004 	 */
3005 	drm_WARN_ON(&i915->drm, 1);
3006 
3007 	return 0;
3008 }
3009 
3010 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3011 {
3012 	int ref_clock = i915->dpll.ref_clks.nssc;
3013 
3014 	/*
3015 	 * For ICL+, the spec states: if reference frequency is 38.4,
3016 	 * use 19.2 because the DPLL automatically divides that by 2.
3017 	 */
3018 	if (ref_clock == 38400)
3019 		ref_clock = 19200;
3020 
3021 	return ref_clock;
3022 }
3023 
3024 static bool
3025 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3026 	       struct skl_wrpll_params *wrpll_params)
3027 {
3028 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3029 
3030 	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3031 					 icl_wrpll_ref_clock(i915));
3032 }
3033 
3034 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3035 				      const struct intel_shared_dpll *pll)
3036 {
3037 	return __cnl_ddi_wrpll_get_freq(i915, pll,
3038 					icl_wrpll_ref_clock(i915));
3039 }
3040 
3041 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
3042 				struct intel_encoder *encoder,
3043 				struct intel_dpll_hw_state *pll_state)
3044 {
3045 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3046 	u32 cfgcr0, cfgcr1;
3047 	struct skl_wrpll_params pll_params = { 0 };
3048 	bool ret;
3049 
3050 	if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
3051 							encoder->port)))
3052 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3053 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3054 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3055 		ret = icl_calc_wrpll(crtc_state, &pll_params);
3056 	else
3057 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3058 
3059 	if (!ret)
3060 		return false;
3061 
3062 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
3063 		 pll_params.dco_integer;
3064 
3065 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
3066 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
3067 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
3068 		 DPLL_CFGCR1_PDIV(pll_params.pdiv);
3069 
3070 	if (INTEL_GEN(dev_priv) >= 12)
3071 		cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3072 	else
3073 		cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3074 
3075 	memset(pll_state, 0, sizeof(*pll_state));
3076 
3077 	pll_state->cfgcr0 = cfgcr0;
3078 	pll_state->cfgcr1 = cfgcr1;
3079 
3080 	return true;
3081 }
3082 
3083 
3084 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3085 {
3086 	return id - DPLL_ID_ICL_MGPLL1;
3087 }
3088 
3089 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3090 {
3091 	return tc_port + DPLL_ID_ICL_MGPLL1;
3092 }
3093 
3094 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3095 				     u32 *target_dco_khz,
3096 				     struct intel_dpll_hw_state *state,
3097 				     bool is_dkl)
3098 {
3099 	u32 dco_min_freq, dco_max_freq;
3100 	int div1_vals[] = {7, 5, 3, 2};
3101 	unsigned int i;
3102 	int div2;
3103 
3104 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3105 	dco_max_freq = is_dp ? 8100000 : 10000000;
3106 
3107 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3108 		int div1 = div1_vals[i];
3109 
3110 		for (div2 = 10; div2 > 0; div2--) {
3111 			int dco = div1 * div2 * clock_khz * 5;
3112 			int a_divratio, tlinedrv, inputsel;
3113 			u32 hsdiv;
3114 
3115 			if (dco < dco_min_freq || dco > dco_max_freq)
3116 				continue;
3117 
3118 			if (div2 >= 2) {
3119 				/*
3120 				 * Note: a_divratio not matching TGL BSpec
3121 				 * algorithm but matching hardcoded values and
3122 				 * working on HW for DP alt-mode at least
3123 				 */
3124 				a_divratio = is_dp ? 10 : 5;
3125 				tlinedrv = is_dkl ? 1 : 2;
3126 			} else {
3127 				a_divratio = 5;
3128 				tlinedrv = 0;
3129 			}
3130 			inputsel = is_dp ? 0 : 1;
3131 
3132 			switch (div1) {
3133 			default:
3134 				MISSING_CASE(div1);
3135 				/* fall through */
3136 			case 2:
3137 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3138 				break;
3139 			case 3:
3140 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3141 				break;
3142 			case 5:
3143 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3144 				break;
3145 			case 7:
3146 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3147 				break;
3148 			}
3149 
3150 			*target_dco_khz = dco;
3151 
3152 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3153 
3154 			state->mg_clktop2_coreclkctl1 =
3155 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3156 
3157 			state->mg_clktop2_hsclkctl =
3158 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3159 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3160 				hsdiv |
3161 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3162 
3163 			return true;
3164 		}
3165 	}
3166 
3167 	return false;
3168 }
3169 
3170 /*
3171  * The specification for this function uses real numbers, so the math had to be
3172  * adapted to integer-only calculation, that's why it looks so different.
3173  */
3174 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3175 				  struct intel_dpll_hw_state *pll_state)
3176 {
3177 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3178 	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3179 	int clock = crtc_state->port_clock;
3180 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3181 	u32 iref_ndiv, iref_trim, iref_pulse_w;
3182 	u32 prop_coeff, int_coeff;
3183 	u32 tdc_targetcnt, feedfwgain;
3184 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3185 	u64 tmp;
3186 	bool use_ssc = false;
3187 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3188 	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
3189 
3190 	memset(pll_state, 0, sizeof(*pll_state));
3191 
3192 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3193 				      pll_state, is_dkl)) {
3194 		drm_dbg_kms(&dev_priv->drm,
3195 			    "Failed to find divisors for clock %d\n", clock);
3196 		return false;
3197 	}
3198 
3199 	m1div = 2;
3200 	m2div_int = dco_khz / (refclk_khz * m1div);
3201 	if (m2div_int > 255) {
3202 		if (!is_dkl) {
3203 			m1div = 4;
3204 			m2div_int = dco_khz / (refclk_khz * m1div);
3205 		}
3206 
3207 		if (m2div_int > 255) {
3208 			drm_dbg_kms(&dev_priv->drm,
3209 				    "Failed to find mdiv for clock %d\n",
3210 				    clock);
3211 			return false;
3212 		}
3213 	}
3214 	m2div_rem = dco_khz % (refclk_khz * m1div);
3215 
3216 	tmp = (u64)m2div_rem * (1 << 22);
3217 	do_div(tmp, refclk_khz * m1div);
3218 	m2div_frac = tmp;
3219 
3220 	switch (refclk_khz) {
3221 	case 19200:
3222 		iref_ndiv = 1;
3223 		iref_trim = 28;
3224 		iref_pulse_w = 1;
3225 		break;
3226 	case 24000:
3227 		iref_ndiv = 1;
3228 		iref_trim = 25;
3229 		iref_pulse_w = 2;
3230 		break;
3231 	case 38400:
3232 		iref_ndiv = 2;
3233 		iref_trim = 28;
3234 		iref_pulse_w = 1;
3235 		break;
3236 	default:
3237 		MISSING_CASE(refclk_khz);
3238 		return false;
3239 	}
3240 
3241 	/*
3242 	 * tdc_res = 0.000003
3243 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3244 	 *
3245 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3246 	 * was supposed to be a division, but we rearranged the operations of
3247 	 * the formula to avoid early divisions so we don't multiply the
3248 	 * rounding errors.
3249 	 *
3250 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3251 	 * we also rearrange to work with integers.
3252 	 *
3253 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3254 	 * last division by 10.
3255 	 */
3256 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3257 
3258 	/*
3259 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3260 	 * 32 bits. That's not a problem since we round the division down
3261 	 * anyway.
3262 	 */
3263 	feedfwgain = (use_ssc || m2div_rem > 0) ?
3264 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3265 
3266 	if (dco_khz >= 9000000) {
3267 		prop_coeff = 5;
3268 		int_coeff = 10;
3269 	} else {
3270 		prop_coeff = 4;
3271 		int_coeff = 8;
3272 	}
3273 
3274 	if (use_ssc) {
3275 		tmp = mul_u32_u32(dco_khz, 47 * 32);
3276 		do_div(tmp, refclk_khz * m1div * 10000);
3277 		ssc_stepsize = tmp;
3278 
3279 		tmp = mul_u32_u32(dco_khz, 1000);
3280 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3281 	} else {
3282 		ssc_stepsize = 0;
3283 		ssc_steplen = 0;
3284 	}
3285 	ssc_steplog = 4;
3286 
3287 	/* write pll_state calculations */
3288 	if (is_dkl) {
3289 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3290 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3291 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
3292 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3293 
3294 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3295 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3296 
3297 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3298 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3299 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3300 					(use_ssc ? DKL_PLL_SSC_EN : 0);
3301 
3302 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3303 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3304 
3305 		pll_state->mg_pll_tdc_coldst_bias =
3306 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3307 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3308 
3309 	} else {
3310 		pll_state->mg_pll_div0 =
3311 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3312 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3313 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
3314 
3315 		pll_state->mg_pll_div1 =
3316 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3317 			MG_PLL_DIV1_DITHER_DIV_2 |
3318 			MG_PLL_DIV1_NDIVRATIO(1) |
3319 			MG_PLL_DIV1_FBPREDIV(m1div);
3320 
3321 		pll_state->mg_pll_lf =
3322 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3323 			MG_PLL_LF_AFCCNTSEL_512 |
3324 			MG_PLL_LF_GAINCTRL(1) |
3325 			MG_PLL_LF_INT_COEFF(int_coeff) |
3326 			MG_PLL_LF_PROP_COEFF(prop_coeff);
3327 
3328 		pll_state->mg_pll_frac_lock =
3329 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3330 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3331 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3332 			MG_PLL_FRAC_LOCK_DCODITHEREN |
3333 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3334 		if (use_ssc || m2div_rem > 0)
3335 			pll_state->mg_pll_frac_lock |=
3336 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3337 
3338 		pll_state->mg_pll_ssc =
3339 			(use_ssc ? MG_PLL_SSC_EN : 0) |
3340 			MG_PLL_SSC_TYPE(2) |
3341 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3342 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
3343 			MG_PLL_SSC_FLLEN |
3344 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3345 
3346 		pll_state->mg_pll_tdc_coldst_bias =
3347 			MG_PLL_TDC_COLDST_COLDSTART |
3348 			MG_PLL_TDC_COLDST_IREFINT_EN |
3349 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3350 			MG_PLL_TDC_TDCOVCCORR_EN |
3351 			MG_PLL_TDC_TDCSEL(3);
3352 
3353 		pll_state->mg_pll_bias =
3354 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
3355 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3356 			MG_PLL_BIAS_BIAS_BONUS(10) |
3357 			MG_PLL_BIAS_BIASCAL_EN |
3358 			MG_PLL_BIAS_CTRIM(12) |
3359 			MG_PLL_BIAS_VREF_RDAC(4) |
3360 			MG_PLL_BIAS_IREFTRIM(iref_trim);
3361 
3362 		if (refclk_khz == 38400) {
3363 			pll_state->mg_pll_tdc_coldst_bias_mask =
3364 				MG_PLL_TDC_COLDST_COLDSTART;
3365 			pll_state->mg_pll_bias_mask = 0;
3366 		} else {
3367 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3368 			pll_state->mg_pll_bias_mask = -1U;
3369 		}
3370 
3371 		pll_state->mg_pll_tdc_coldst_bias &=
3372 			pll_state->mg_pll_tdc_coldst_bias_mask;
3373 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3374 	}
3375 
3376 	return true;
3377 }
3378 
3379 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3380 				   const struct intel_shared_dpll *pll)
3381 {
3382 	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
3383 	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3384 	u64 tmp;
3385 
3386 	ref_clock = dev_priv->dpll.ref_clks.nssc;
3387 
3388 	if (INTEL_GEN(dev_priv) >= 12) {
3389 		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3390 		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3391 		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3392 
3393 		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3394 			m2_frac = pll_state->mg_pll_bias &
3395 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3396 			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3397 		} else {
3398 			m2_frac = 0;
3399 		}
3400 	} else {
3401 		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3402 		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3403 
3404 		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3405 			m2_frac = pll_state->mg_pll_div0 &
3406 				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
3407 			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3408 		} else {
3409 			m2_frac = 0;
3410 		}
3411 	}
3412 
3413 	switch (pll_state->mg_clktop2_hsclkctl &
3414 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3415 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3416 		div1 = 2;
3417 		break;
3418 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3419 		div1 = 3;
3420 		break;
3421 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3422 		div1 = 5;
3423 		break;
3424 	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3425 		div1 = 7;
3426 		break;
3427 	default:
3428 		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3429 		return 0;
3430 	}
3431 
3432 	div2 = (pll_state->mg_clktop2_hsclkctl &
3433 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3434 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3435 
3436 	/* div2 value of 0 is same as 1 means no div */
3437 	if (div2 == 0)
3438 		div2 = 1;
3439 
3440 	/*
3441 	 * Adjust the original formula to delay the division by 2^22 in order to
3442 	 * minimize possible rounding errors.
3443 	 */
3444 	tmp = (u64)m1 * m2_int * ref_clock +
3445 	      (((u64)m1 * m2_frac * ref_clock) >> 22);
3446 	tmp = div_u64(tmp, 5 * div1 * div2);
3447 
3448 	return tmp;
3449 }
3450 
3451 /**
3452  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3453  * @crtc_state: state for the CRTC to select the DPLL for
3454  * @port_dpll_id: the active @port_dpll_id to select
3455  *
3456  * Select the given @port_dpll_id instance from the DPLLs reserved for the
3457  * CRTC.
3458  */
3459 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3460 			      enum icl_port_dpll_id port_dpll_id)
3461 {
3462 	struct icl_port_dpll *port_dpll =
3463 		&crtc_state->icl_port_dplls[port_dpll_id];
3464 
3465 	crtc_state->shared_dpll = port_dpll->pll;
3466 	crtc_state->dpll_hw_state = port_dpll->hw_state;
3467 }
3468 
3469 static void icl_update_active_dpll(struct intel_atomic_state *state,
3470 				   struct intel_crtc *crtc,
3471 				   struct intel_encoder *encoder)
3472 {
3473 	struct intel_crtc_state *crtc_state =
3474 		intel_atomic_get_new_crtc_state(state, crtc);
3475 	struct intel_digital_port *primary_port;
3476 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3477 
3478 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3479 		enc_to_mst(encoder)->primary :
3480 		enc_to_dig_port(encoder);
3481 
3482 	if (primary_port &&
3483 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
3484 	     primary_port->tc_mode == TC_PORT_LEGACY))
3485 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3486 
3487 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
3488 }
3489 
3490 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3491 				   struct intel_crtc *crtc,
3492 				   struct intel_encoder *encoder)
3493 {
3494 	struct intel_crtc_state *crtc_state =
3495 		intel_atomic_get_new_crtc_state(state, crtc);
3496 	struct icl_port_dpll *port_dpll =
3497 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3498 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3499 	enum port port = encoder->port;
3500 	unsigned long dpll_mask;
3501 
3502 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3503 		drm_dbg_kms(&dev_priv->drm,
3504 			    "Could not calculate combo PHY PLL state.\n");
3505 
3506 		return false;
3507 	}
3508 
3509 	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
3510 		dpll_mask =
3511 			BIT(DPLL_ID_EHL_DPLL4) |
3512 			BIT(DPLL_ID_ICL_DPLL1) |
3513 			BIT(DPLL_ID_ICL_DPLL0);
3514 	else
3515 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3516 
3517 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3518 						&port_dpll->hw_state,
3519 						dpll_mask);
3520 	if (!port_dpll->pll) {
3521 		drm_dbg_kms(&dev_priv->drm,
3522 			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3523 			    encoder->base.base.id, encoder->base.name);
3524 		return false;
3525 	}
3526 
3527 	intel_reference_shared_dpll(state, crtc,
3528 				    port_dpll->pll, &port_dpll->hw_state);
3529 
3530 	icl_update_active_dpll(state, crtc, encoder);
3531 
3532 	return true;
3533 }
3534 
3535 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3536 				 struct intel_crtc *crtc,
3537 				 struct intel_encoder *encoder)
3538 {
3539 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3540 	struct intel_crtc_state *crtc_state =
3541 		intel_atomic_get_new_crtc_state(state, crtc);
3542 	struct icl_port_dpll *port_dpll;
3543 	enum intel_dpll_id dpll_id;
3544 
3545 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3546 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3547 		drm_dbg_kms(&dev_priv->drm,
3548 			    "Could not calculate TBT PLL state.\n");
3549 		return false;
3550 	}
3551 
3552 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3553 						&port_dpll->hw_state,
3554 						BIT(DPLL_ID_ICL_TBTPLL));
3555 	if (!port_dpll->pll) {
3556 		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3557 		return false;
3558 	}
3559 	intel_reference_shared_dpll(state, crtc,
3560 				    port_dpll->pll, &port_dpll->hw_state);
3561 
3562 
3563 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3564 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3565 		drm_dbg_kms(&dev_priv->drm,
3566 			    "Could not calculate MG PHY PLL state.\n");
3567 		goto err_unreference_tbt_pll;
3568 	}
3569 
3570 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3571 							 encoder->port));
3572 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3573 						&port_dpll->hw_state,
3574 						BIT(dpll_id));
3575 	if (!port_dpll->pll) {
3576 		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3577 		goto err_unreference_tbt_pll;
3578 	}
3579 	intel_reference_shared_dpll(state, crtc,
3580 				    port_dpll->pll, &port_dpll->hw_state);
3581 
3582 	icl_update_active_dpll(state, crtc, encoder);
3583 
3584 	return true;
3585 
3586 err_unreference_tbt_pll:
3587 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3588 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3589 
3590 	return false;
3591 }
3592 
3593 static bool icl_get_dplls(struct intel_atomic_state *state,
3594 			  struct intel_crtc *crtc,
3595 			  struct intel_encoder *encoder)
3596 {
3597 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3598 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3599 
3600 	if (intel_phy_is_combo(dev_priv, phy))
3601 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3602 	else if (intel_phy_is_tc(dev_priv, phy))
3603 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3604 
3605 	MISSING_CASE(phy);
3606 
3607 	return false;
3608 }
3609 
3610 static void icl_put_dplls(struct intel_atomic_state *state,
3611 			  struct intel_crtc *crtc)
3612 {
3613 	const struct intel_crtc_state *old_crtc_state =
3614 		intel_atomic_get_old_crtc_state(state, crtc);
3615 	struct intel_crtc_state *new_crtc_state =
3616 		intel_atomic_get_new_crtc_state(state, crtc);
3617 	enum icl_port_dpll_id id;
3618 
3619 	new_crtc_state->shared_dpll = NULL;
3620 
3621 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3622 		const struct icl_port_dpll *old_port_dpll =
3623 			&old_crtc_state->icl_port_dplls[id];
3624 		struct icl_port_dpll *new_port_dpll =
3625 			&new_crtc_state->icl_port_dplls[id];
3626 
3627 		new_port_dpll->pll = NULL;
3628 
3629 		if (!old_port_dpll->pll)
3630 			continue;
3631 
3632 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3633 	}
3634 }
3635 
3636 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3637 				struct intel_shared_dpll *pll,
3638 				struct intel_dpll_hw_state *hw_state)
3639 {
3640 	const enum intel_dpll_id id = pll->info->id;
3641 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3642 	intel_wakeref_t wakeref;
3643 	bool ret = false;
3644 	u32 val;
3645 
3646 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3647 						     POWER_DOMAIN_DISPLAY_CORE);
3648 	if (!wakeref)
3649 		return false;
3650 
3651 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3652 	if (!(val & PLL_ENABLE))
3653 		goto out;
3654 
3655 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3656 						  MG_REFCLKIN_CTL(tc_port));
3657 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3658 
3659 	hw_state->mg_clktop2_coreclkctl1 =
3660 		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3661 	hw_state->mg_clktop2_coreclkctl1 &=
3662 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3663 
3664 	hw_state->mg_clktop2_hsclkctl =
3665 		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3666 	hw_state->mg_clktop2_hsclkctl &=
3667 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3668 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3669 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3670 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3671 
3672 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3673 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3674 	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3675 	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3676 						   MG_PLL_FRAC_LOCK(tc_port));
3677 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3678 
3679 	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3680 	hw_state->mg_pll_tdc_coldst_bias =
3681 		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3682 
3683 	if (dev_priv->dpll.ref_clks.nssc == 38400) {
3684 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3685 		hw_state->mg_pll_bias_mask = 0;
3686 	} else {
3687 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3688 		hw_state->mg_pll_bias_mask = -1U;
3689 	}
3690 
3691 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3692 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3693 
3694 	ret = true;
3695 out:
3696 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3697 	return ret;
3698 }
3699 
3700 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3701 				 struct intel_shared_dpll *pll,
3702 				 struct intel_dpll_hw_state *hw_state)
3703 {
3704 	const enum intel_dpll_id id = pll->info->id;
3705 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3706 	intel_wakeref_t wakeref;
3707 	bool ret = false;
3708 	u32 val;
3709 
3710 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3711 						     POWER_DOMAIN_DISPLAY_CORE);
3712 	if (!wakeref)
3713 		return false;
3714 
3715 	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3716 	if (!(val & PLL_ENABLE))
3717 		goto out;
3718 
3719 	/*
3720 	 * All registers read here have the same HIP_INDEX_REG even though
3721 	 * they are on different building blocks
3722 	 */
3723 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3724 		       HIP_INDEX_VAL(tc_port, 0x2));
3725 
3726 	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3727 						  DKL_REFCLKIN_CTL(tc_port));
3728 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3729 
3730 	hw_state->mg_clktop2_hsclkctl =
3731 		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3732 	hw_state->mg_clktop2_hsclkctl &=
3733 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3734 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3735 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3736 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3737 
3738 	hw_state->mg_clktop2_coreclkctl1 =
3739 		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3740 	hw_state->mg_clktop2_coreclkctl1 &=
3741 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3742 
3743 	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3744 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3745 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3746 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3747 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3748 
3749 	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3750 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3751 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3752 
3753 	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3754 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3755 				 DKL_PLL_SSC_STEP_LEN_MASK |
3756 				 DKL_PLL_SSC_STEP_NUM_MASK |
3757 				 DKL_PLL_SSC_EN);
3758 
3759 	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3760 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3761 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3762 
3763 	hw_state->mg_pll_tdc_coldst_bias =
3764 		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3765 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3766 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3767 
3768 	ret = true;
3769 out:
3770 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3771 	return ret;
3772 }
3773 
3774 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3775 				 struct intel_shared_dpll *pll,
3776 				 struct intel_dpll_hw_state *hw_state,
3777 				 i915_reg_t enable_reg)
3778 {
3779 	const enum intel_dpll_id id = pll->info->id;
3780 	intel_wakeref_t wakeref;
3781 	bool ret = false;
3782 	u32 val;
3783 
3784 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3785 						     POWER_DOMAIN_DISPLAY_CORE);
3786 	if (!wakeref)
3787 		return false;
3788 
3789 	val = intel_de_read(dev_priv, enable_reg);
3790 	if (!(val & PLL_ENABLE))
3791 		goto out;
3792 
3793 	if (INTEL_GEN(dev_priv) >= 12) {
3794 		hw_state->cfgcr0 = intel_de_read(dev_priv,
3795 						 TGL_DPLL_CFGCR0(id));
3796 		hw_state->cfgcr1 = intel_de_read(dev_priv,
3797 						 TGL_DPLL_CFGCR1(id));
3798 	} else {
3799 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3800 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3801 							 ICL_DPLL_CFGCR0(4));
3802 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3803 							 ICL_DPLL_CFGCR1(4));
3804 		} else {
3805 			hw_state->cfgcr0 = intel_de_read(dev_priv,
3806 							 ICL_DPLL_CFGCR0(id));
3807 			hw_state->cfgcr1 = intel_de_read(dev_priv,
3808 							 ICL_DPLL_CFGCR1(id));
3809 		}
3810 	}
3811 
3812 	ret = true;
3813 out:
3814 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3815 	return ret;
3816 }
3817 
3818 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3819 				   struct intel_shared_dpll *pll,
3820 				   struct intel_dpll_hw_state *hw_state)
3821 {
3822 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3823 
3824 	if (IS_ELKHARTLAKE(dev_priv) &&
3825 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3826 		enable_reg = MG_PLL_ENABLE(0);
3827 	}
3828 
3829 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3830 }
3831 
3832 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3833 				 struct intel_shared_dpll *pll,
3834 				 struct intel_dpll_hw_state *hw_state)
3835 {
3836 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3837 }
3838 
3839 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3840 			   struct intel_shared_dpll *pll)
3841 {
3842 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3843 	const enum intel_dpll_id id = pll->info->id;
3844 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3845 
3846 	if (INTEL_GEN(dev_priv) >= 12) {
3847 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3848 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3849 	} else {
3850 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3851 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3852 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3853 		} else {
3854 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3855 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3856 		}
3857 	}
3858 
3859 	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3860 	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3861 	intel_de_posting_read(dev_priv, cfgcr1_reg);
3862 }
3863 
3864 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3865 			     struct intel_shared_dpll *pll)
3866 {
3867 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3868 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3869 	u32 val;
3870 
3871 	/*
3872 	 * Some of the following registers have reserved fields, so program
3873 	 * these with RMW based on a mask. The mask can be fixed or generated
3874 	 * during the calc/readout phase if the mask depends on some other HW
3875 	 * state like refclk, see icl_calc_mg_pll_state().
3876 	 */
3877 	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3878 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3879 	val |= hw_state->mg_refclkin_ctl;
3880 	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3881 
3882 	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3883 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3884 	val |= hw_state->mg_clktop2_coreclkctl1;
3885 	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3886 
3887 	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3888 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3889 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3890 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3891 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3892 	val |= hw_state->mg_clktop2_hsclkctl;
3893 	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3894 
3895 	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3896 	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3897 	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3898 	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3899 		       hw_state->mg_pll_frac_lock);
3900 	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3901 
3902 	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3903 	val &= ~hw_state->mg_pll_bias_mask;
3904 	val |= hw_state->mg_pll_bias;
3905 	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3906 
3907 	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3908 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3909 	val |= hw_state->mg_pll_tdc_coldst_bias;
3910 	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3911 
3912 	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3913 }
3914 
3915 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3916 			  struct intel_shared_dpll *pll)
3917 {
3918 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3919 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3920 	u32 val;
3921 
3922 	/*
3923 	 * All registers programmed here have the same HIP_INDEX_REG even
3924 	 * though on different building block
3925 	 */
3926 	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3927 		       HIP_INDEX_VAL(tc_port, 0x2));
3928 
3929 	/* All the registers are RMW */
3930 	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3931 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3932 	val |= hw_state->mg_refclkin_ctl;
3933 	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3934 
3935 	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3936 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3937 	val |= hw_state->mg_clktop2_coreclkctl1;
3938 	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3939 
3940 	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3941 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3942 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3943 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3944 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3945 	val |= hw_state->mg_clktop2_hsclkctl;
3946 	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3947 
3948 	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3949 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3950 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3951 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3952 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3953 	val |= hw_state->mg_pll_div0;
3954 	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3955 
3956 	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3957 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3958 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3959 	val |= hw_state->mg_pll_div1;
3960 	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3961 
3962 	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3963 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3964 		 DKL_PLL_SSC_STEP_LEN_MASK |
3965 		 DKL_PLL_SSC_STEP_NUM_MASK |
3966 		 DKL_PLL_SSC_EN);
3967 	val |= hw_state->mg_pll_ssc;
3968 	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3969 
3970 	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3971 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3972 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3973 	val |= hw_state->mg_pll_bias;
3974 	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3975 
3976 	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3977 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3978 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3979 	val |= hw_state->mg_pll_tdc_coldst_bias;
3980 	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3981 
3982 	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3983 }
3984 
3985 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3986 				 struct intel_shared_dpll *pll,
3987 				 i915_reg_t enable_reg)
3988 {
3989 	u32 val;
3990 
3991 	val = intel_de_read(dev_priv, enable_reg);
3992 	val |= PLL_POWER_ENABLE;
3993 	intel_de_write(dev_priv, enable_reg, val);
3994 
3995 	/*
3996 	 * The spec says we need to "wait" but it also says it should be
3997 	 * immediate.
3998 	 */
3999 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4000 		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4001 			pll->info->id);
4002 }
4003 
4004 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4005 			   struct intel_shared_dpll *pll,
4006 			   i915_reg_t enable_reg)
4007 {
4008 	u32 val;
4009 
4010 	val = intel_de_read(dev_priv, enable_reg);
4011 	val |= PLL_ENABLE;
4012 	intel_de_write(dev_priv, enable_reg, val);
4013 
4014 	/* Timeout is actually 600us. */
4015 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4016 		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4017 }
4018 
4019 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4020 			     struct intel_shared_dpll *pll)
4021 {
4022 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
4023 
4024 	if (IS_ELKHARTLAKE(dev_priv) &&
4025 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4026 		enable_reg = MG_PLL_ENABLE(0);
4027 
4028 		/*
4029 		 * We need to disable DC states when this DPLL is enabled.
4030 		 * This can be done by taking a reference on DPLL4 power
4031 		 * domain.
4032 		 */
4033 		pll->wakeref = intel_display_power_get(dev_priv,
4034 						       POWER_DOMAIN_DPLL_DC_OFF);
4035 	}
4036 
4037 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4038 
4039 	icl_dpll_write(dev_priv, pll);
4040 
4041 	/*
4042 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4043 	 * paths should already be setting the appropriate voltage, hence we do
4044 	 * nothing here.
4045 	 */
4046 
4047 	icl_pll_enable(dev_priv, pll, enable_reg);
4048 
4049 	/* DVFS post sequence would be here. See the comment above. */
4050 }
4051 
4052 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4053 			   struct intel_shared_dpll *pll)
4054 {
4055 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4056 
4057 	icl_dpll_write(dev_priv, pll);
4058 
4059 	/*
4060 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4061 	 * paths should already be setting the appropriate voltage, hence we do
4062 	 * nothing here.
4063 	 */
4064 
4065 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4066 
4067 	/* DVFS post sequence would be here. See the comment above. */
4068 }
4069 
4070 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4071 			  struct intel_shared_dpll *pll)
4072 {
4073 	i915_reg_t enable_reg =
4074 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4075 
4076 	icl_pll_power_enable(dev_priv, pll, enable_reg);
4077 
4078 	if (INTEL_GEN(dev_priv) >= 12)
4079 		dkl_pll_write(dev_priv, pll);
4080 	else
4081 		icl_mg_pll_write(dev_priv, pll);
4082 
4083 	/*
4084 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4085 	 * paths should already be setting the appropriate voltage, hence we do
4086 	 * nothing here.
4087 	 */
4088 
4089 	icl_pll_enable(dev_priv, pll, enable_reg);
4090 
4091 	/* DVFS post sequence would be here. See the comment above. */
4092 }
4093 
4094 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4095 			    struct intel_shared_dpll *pll,
4096 			    i915_reg_t enable_reg)
4097 {
4098 	u32 val;
4099 
4100 	/* The first steps are done by intel_ddi_post_disable(). */
4101 
4102 	/*
4103 	 * DVFS pre sequence would be here, but in our driver the cdclk code
4104 	 * paths should already be setting the appropriate voltage, hence we do
4105 	 * nothign here.
4106 	 */
4107 
4108 	val = intel_de_read(dev_priv, enable_reg);
4109 	val &= ~PLL_ENABLE;
4110 	intel_de_write(dev_priv, enable_reg, val);
4111 
4112 	/* Timeout is actually 1us. */
4113 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4114 		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4115 
4116 	/* DVFS post sequence would be here. See the comment above. */
4117 
4118 	val = intel_de_read(dev_priv, enable_reg);
4119 	val &= ~PLL_POWER_ENABLE;
4120 	intel_de_write(dev_priv, enable_reg, val);
4121 
4122 	/*
4123 	 * The spec says we need to "wait" but it also says it should be
4124 	 * immediate.
4125 	 */
4126 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4127 		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4128 			pll->info->id);
4129 }
4130 
4131 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4132 			      struct intel_shared_dpll *pll)
4133 {
4134 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
4135 
4136 	if (IS_ELKHARTLAKE(dev_priv) &&
4137 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4138 		enable_reg = MG_PLL_ENABLE(0);
4139 		icl_pll_disable(dev_priv, pll, enable_reg);
4140 
4141 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4142 					pll->wakeref);
4143 		return;
4144 	}
4145 
4146 	icl_pll_disable(dev_priv, pll, enable_reg);
4147 }
4148 
4149 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4150 			    struct intel_shared_dpll *pll)
4151 {
4152 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4153 }
4154 
4155 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4156 			   struct intel_shared_dpll *pll)
4157 {
4158 	i915_reg_t enable_reg =
4159 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4160 
4161 	icl_pll_disable(dev_priv, pll, enable_reg);
4162 }
4163 
4164 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4165 {
4166 	/* No SSC ref */
4167 	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4168 }
4169 
4170 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4171 			      const struct intel_dpll_hw_state *hw_state)
4172 {
4173 	drm_dbg_kms(&dev_priv->drm,
4174 		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4175 		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4176 		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4177 		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4178 		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4179 		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4180 		    hw_state->cfgcr0, hw_state->cfgcr1,
4181 		    hw_state->mg_refclkin_ctl,
4182 		    hw_state->mg_clktop2_coreclkctl1,
4183 		    hw_state->mg_clktop2_hsclkctl,
4184 		    hw_state->mg_pll_div0,
4185 		    hw_state->mg_pll_div1,
4186 		    hw_state->mg_pll_lf,
4187 		    hw_state->mg_pll_frac_lock,
4188 		    hw_state->mg_pll_ssc,
4189 		    hw_state->mg_pll_bias,
4190 		    hw_state->mg_pll_tdc_coldst_bias);
4191 }
4192 
4193 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4194 	.enable = combo_pll_enable,
4195 	.disable = combo_pll_disable,
4196 	.get_hw_state = combo_pll_get_hw_state,
4197 	.get_freq = icl_ddi_combo_pll_get_freq,
4198 };
4199 
4200 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4201 	.enable = tbt_pll_enable,
4202 	.disable = tbt_pll_disable,
4203 	.get_hw_state = tbt_pll_get_hw_state,
4204 	.get_freq = icl_ddi_tbt_pll_get_freq,
4205 };
4206 
4207 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4208 	.enable = mg_pll_enable,
4209 	.disable = mg_pll_disable,
4210 	.get_hw_state = mg_pll_get_hw_state,
4211 	.get_freq = icl_ddi_mg_pll_get_freq,
4212 };
4213 
4214 static const struct dpll_info icl_plls[] = {
4215 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4216 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4217 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4218 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4219 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4220 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4221 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4222 	{ },
4223 };
4224 
4225 static const struct intel_dpll_mgr icl_pll_mgr = {
4226 	.dpll_info = icl_plls,
4227 	.get_dplls = icl_get_dplls,
4228 	.put_dplls = icl_put_dplls,
4229 	.update_active_dpll = icl_update_active_dpll,
4230 	.update_ref_clks = icl_update_dpll_ref_clks,
4231 	.dump_hw_state = icl_dump_hw_state,
4232 };
4233 
4234 static const struct dpll_info ehl_plls[] = {
4235 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4236 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4237 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4238 	{ },
4239 };
4240 
4241 static const struct intel_dpll_mgr ehl_pll_mgr = {
4242 	.dpll_info = ehl_plls,
4243 	.get_dplls = icl_get_dplls,
4244 	.put_dplls = icl_put_dplls,
4245 	.update_ref_clks = icl_update_dpll_ref_clks,
4246 	.dump_hw_state = icl_dump_hw_state,
4247 };
4248 
4249 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4250 	.enable = mg_pll_enable,
4251 	.disable = mg_pll_disable,
4252 	.get_hw_state = dkl_pll_get_hw_state,
4253 	.get_freq = icl_ddi_mg_pll_get_freq,
4254 };
4255 
4256 static const struct dpll_info tgl_plls[] = {
4257 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
4258 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
4259 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4260 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4261 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4262 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4263 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4264 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4265 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4266 	{ },
4267 };
4268 
4269 static const struct intel_dpll_mgr tgl_pll_mgr = {
4270 	.dpll_info = tgl_plls,
4271 	.get_dplls = icl_get_dplls,
4272 	.put_dplls = icl_put_dplls,
4273 	.update_active_dpll = icl_update_active_dpll,
4274 	.update_ref_clks = icl_update_dpll_ref_clks,
4275 	.dump_hw_state = icl_dump_hw_state,
4276 };
4277 
4278 /**
4279  * intel_shared_dpll_init - Initialize shared DPLLs
4280  * @dev: drm device
4281  *
4282  * Initialize shared DPLLs for @dev.
4283  */
4284 void intel_shared_dpll_init(struct drm_device *dev)
4285 {
4286 	struct drm_i915_private *dev_priv = to_i915(dev);
4287 	const struct intel_dpll_mgr *dpll_mgr = NULL;
4288 	const struct dpll_info *dpll_info;
4289 	int i;
4290 
4291 	if (INTEL_GEN(dev_priv) >= 12)
4292 		dpll_mgr = &tgl_pll_mgr;
4293 	else if (IS_ELKHARTLAKE(dev_priv))
4294 		dpll_mgr = &ehl_pll_mgr;
4295 	else if (INTEL_GEN(dev_priv) >= 11)
4296 		dpll_mgr = &icl_pll_mgr;
4297 	else if (IS_CANNONLAKE(dev_priv))
4298 		dpll_mgr = &cnl_pll_mgr;
4299 	else if (IS_GEN9_BC(dev_priv))
4300 		dpll_mgr = &skl_pll_mgr;
4301 	else if (IS_GEN9_LP(dev_priv))
4302 		dpll_mgr = &bxt_pll_mgr;
4303 	else if (HAS_DDI(dev_priv))
4304 		dpll_mgr = &hsw_pll_mgr;
4305 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4306 		dpll_mgr = &pch_pll_mgr;
4307 
4308 	if (!dpll_mgr) {
4309 		dev_priv->dpll.num_shared_dpll = 0;
4310 		return;
4311 	}
4312 
4313 	dpll_info = dpll_mgr->dpll_info;
4314 
4315 	for (i = 0; dpll_info[i].name; i++) {
4316 		drm_WARN_ON(dev, i != dpll_info[i].id);
4317 		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4318 	}
4319 
4320 	dev_priv->dpll.mgr = dpll_mgr;
4321 	dev_priv->dpll.num_shared_dpll = i;
4322 	mutex_init(&dev_priv->dpll.lock);
4323 
4324 	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4325 }
4326 
4327 /**
4328  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4329  * @state: atomic state
4330  * @crtc: CRTC to reserve DPLLs for
4331  * @encoder: encoder
4332  *
4333  * This function reserves all required DPLLs for the given CRTC and encoder
4334  * combination in the current atomic commit @state and the new @crtc atomic
4335  * state.
4336  *
4337  * The new configuration in the atomic commit @state is made effective by
4338  * calling intel_shared_dpll_swap_state().
4339  *
4340  * The reserved DPLLs should be released by calling
4341  * intel_release_shared_dplls().
4342  *
4343  * Returns:
4344  * True if all required DPLLs were successfully reserved.
4345  */
4346 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4347 				struct intel_crtc *crtc,
4348 				struct intel_encoder *encoder)
4349 {
4350 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4351 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4352 
4353 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4354 		return false;
4355 
4356 	return dpll_mgr->get_dplls(state, crtc, encoder);
4357 }
4358 
4359 /**
4360  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4361  * @state: atomic state
4362  * @crtc: crtc from which the DPLLs are to be released
4363  *
4364  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4365  * from the current atomic commit @state and the old @crtc atomic state.
4366  *
4367  * The new configuration in the atomic commit @state is made effective by
4368  * calling intel_shared_dpll_swap_state().
4369  */
4370 void intel_release_shared_dplls(struct intel_atomic_state *state,
4371 				struct intel_crtc *crtc)
4372 {
4373 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4374 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4375 
4376 	/*
4377 	 * FIXME: this function is called for every platform having a
4378 	 * compute_clock hook, even though the platform doesn't yet support
4379 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4380 	 * called on those.
4381 	 */
4382 	if (!dpll_mgr)
4383 		return;
4384 
4385 	dpll_mgr->put_dplls(state, crtc);
4386 }
4387 
4388 /**
4389  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4390  * @state: atomic state
4391  * @crtc: the CRTC for which to update the active DPLL
4392  * @encoder: encoder determining the type of port DPLL
4393  *
4394  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4395  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4396  * DPLL selected will be based on the current mode of the encoder's port.
4397  */
4398 void intel_update_active_dpll(struct intel_atomic_state *state,
4399 			      struct intel_crtc *crtc,
4400 			      struct intel_encoder *encoder)
4401 {
4402 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4403 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4404 
4405 	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4406 		return;
4407 
4408 	dpll_mgr->update_active_dpll(state, crtc, encoder);
4409 }
4410 
4411 /**
4412  * intel_dpll_get_freq - calculate the DPLL's output frequency
4413  * @i915: i915 device
4414  * @pll: DPLL for which to calculate the output frequency
4415  *
4416  * Return the output frequency corresponding to @pll's current state.
4417  */
4418 int intel_dpll_get_freq(struct drm_i915_private *i915,
4419 			const struct intel_shared_dpll *pll)
4420 {
4421 	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4422 		return 0;
4423 
4424 	return pll->info->funcs->get_freq(i915, pll);
4425 }
4426 
4427 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4428 				  struct intel_shared_dpll *pll)
4429 {
4430 	struct intel_crtc *crtc;
4431 
4432 	pll->on = pll->info->funcs->get_hw_state(i915, pll,
4433 						 &pll->state.hw_state);
4434 
4435 	if (IS_ELKHARTLAKE(i915) && pll->on &&
4436 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
4437 		pll->wakeref = intel_display_power_get(i915,
4438 						       POWER_DOMAIN_DPLL_DC_OFF);
4439 	}
4440 
4441 	pll->state.crtc_mask = 0;
4442 	for_each_intel_crtc(&i915->drm, crtc) {
4443 		struct intel_crtc_state *crtc_state =
4444 			to_intel_crtc_state(crtc->base.state);
4445 
4446 		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4447 			pll->state.crtc_mask |= 1 << crtc->pipe;
4448 	}
4449 	pll->active_mask = pll->state.crtc_mask;
4450 
4451 	drm_dbg_kms(&i915->drm,
4452 		    "%s hw state readout: crtc_mask 0x%08x, on %i\n",
4453 		    pll->info->name, pll->state.crtc_mask, pll->on);
4454 }
4455 
4456 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4457 {
4458 	int i;
4459 
4460 	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4461 		i915->dpll.mgr->update_ref_clks(i915);
4462 
4463 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4464 		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4465 }
4466 
4467 static void sanitize_dpll_state(struct drm_i915_private *i915,
4468 				struct intel_shared_dpll *pll)
4469 {
4470 	if (!pll->on || pll->active_mask)
4471 		return;
4472 
4473 	drm_dbg_kms(&i915->drm,
4474 		    "%s enabled but not in use, disabling\n",
4475 		    pll->info->name);
4476 
4477 	pll->info->funcs->disable(i915, pll);
4478 	pll->on = false;
4479 }
4480 
4481 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4482 {
4483 	int i;
4484 
4485 	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4486 		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4487 }
4488 
4489 /**
4490  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
4491  * @dev_priv: i915 drm device
4492  * @hw_state: hw state to be written to the log
4493  *
4494  * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4495  */
4496 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4497 			      const struct intel_dpll_hw_state *hw_state)
4498 {
4499 	if (dev_priv->dpll.mgr) {
4500 		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4501 	} else {
4502 		/* fallback for platforms that don't use the shared dpll
4503 		 * infrastructure
4504 		 */
4505 		drm_dbg_kms(&dev_priv->drm,
4506 			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4507 			    "fp0: 0x%x, fp1: 0x%x\n",
4508 			    hw_state->dpll,
4509 			    hw_state->dpll_md,
4510 			    hw_state->fp0,
4511 			    hw_state->fp1);
4512 	}
4513 }
4514