1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34 
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_damage_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_display_debugfs.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dp_mst.h"
51 #include "display/intel_dpll_mgr.h"
52 #include "display/intel_dsi.h"
53 #include "display/intel_dvo.h"
54 #include "display/intel_gmbus.h"
55 #include "display/intel_hdmi.h"
56 #include "display/intel_lvds.h"
57 #include "display/intel_sdvo.h"
58 #include "display/intel_tv.h"
59 #include "display/intel_vdsc.h"
60 
61 #include "gt/intel_rps.h"
62 
63 #include "i915_drv.h"
64 #include "i915_trace.h"
65 #include "intel_acpi.h"
66 #include "intel_atomic.h"
67 #include "intel_atomic_plane.h"
68 #include "intel_bw.h"
69 #include "intel_cdclk.h"
70 #include "intel_color.h"
71 #include "intel_csr.h"
72 #include "intel_cursor.h"
73 #include "intel_display_types.h"
74 #include "intel_dp_link_training.h"
75 #include "intel_fbc.h"
76 #include "intel_fbdev.h"
77 #include "intel_fifo_underrun.h"
78 #include "intel_frontbuffer.h"
79 #include "intel_hdcp.h"
80 #include "intel_hotplug.h"
81 #include "intel_overlay.h"
82 #include "intel_pipe_crc.h"
83 #include "intel_pm.h"
84 #include "intel_psr.h"
85 #include "intel_quirks.h"
86 #include "intel_sideband.h"
87 #include "intel_sprite.h"
88 #include "intel_tc.h"
89 #include "intel_vga.h"
90 #include "i9xx_plane.h"
91 
92 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
93 				struct intel_crtc_state *pipe_config);
94 static void ilk_pch_clock_get(struct intel_crtc *crtc,
95 			      struct intel_crtc_state *pipe_config);
96 
97 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
98 				  struct drm_i915_gem_object *obj,
99 				  struct drm_mode_fb_cmd2 *mode_cmd);
100 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
101 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
102 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
103 					 const struct intel_link_m_n *m_n,
104 					 const struct intel_link_m_n *m2_n2);
105 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
106 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
107 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
108 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
109 static void vlv_prepare_pll(struct intel_crtc *crtc,
110 			    const struct intel_crtc_state *pipe_config);
111 static void chv_prepare_pll(struct intel_crtc *crtc,
112 			    const struct intel_crtc_state *pipe_config);
113 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
114 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
115 static void intel_modeset_setup_hw_state(struct drm_device *dev,
116 					 struct drm_modeset_acquire_ctx *ctx);
117 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
118 
119 struct intel_limit {
120 	struct {
121 		int min, max;
122 	} dot, vco, n, m, m1, m2, p, p1;
123 
124 	struct {
125 		int dot_limit;
126 		int p2_slow, p2_fast;
127 	} p2;
128 };
129 
130 /* returns HPLL frequency in kHz */
131 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
132 {
133 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
134 
135 	/* Obtain SKU information */
136 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
137 		CCK_FUSE_HPLL_FREQ_MASK;
138 
139 	return vco_freq[hpll_freq] * 1000;
140 }
141 
142 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
143 		      const char *name, u32 reg, int ref_freq)
144 {
145 	u32 val;
146 	int divider;
147 
148 	val = vlv_cck_read(dev_priv, reg);
149 	divider = val & CCK_FREQUENCY_VALUES;
150 
151 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
152 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
153 		 "%s change in progress\n", name);
154 
155 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
156 }
157 
158 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
159 			   const char *name, u32 reg)
160 {
161 	int hpll;
162 
163 	vlv_cck_get(dev_priv);
164 
165 	if (dev_priv->hpll_freq == 0)
166 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
167 
168 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
169 
170 	vlv_cck_put(dev_priv);
171 
172 	return hpll;
173 }
174 
175 static void intel_update_czclk(struct drm_i915_private *dev_priv)
176 {
177 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
178 		return;
179 
180 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
181 						      CCK_CZ_CLOCK_CONTROL);
182 
183 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
184 		dev_priv->czclk_freq);
185 }
186 
187 /* units of 100MHz */
188 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
189 			       const struct intel_crtc_state *pipe_config)
190 {
191 	if (HAS_DDI(dev_priv))
192 		return pipe_config->port_clock; /* SPLL */
193 	else
194 		return dev_priv->fdi_pll_freq;
195 }
196 
197 static const struct intel_limit intel_limits_i8xx_dac = {
198 	.dot = { .min = 25000, .max = 350000 },
199 	.vco = { .min = 908000, .max = 1512000 },
200 	.n = { .min = 2, .max = 16 },
201 	.m = { .min = 96, .max = 140 },
202 	.m1 = { .min = 18, .max = 26 },
203 	.m2 = { .min = 6, .max = 16 },
204 	.p = { .min = 4, .max = 128 },
205 	.p1 = { .min = 2, .max = 33 },
206 	.p2 = { .dot_limit = 165000,
207 		.p2_slow = 4, .p2_fast = 2 },
208 };
209 
210 static const struct intel_limit intel_limits_i8xx_dvo = {
211 	.dot = { .min = 25000, .max = 350000 },
212 	.vco = { .min = 908000, .max = 1512000 },
213 	.n = { .min = 2, .max = 16 },
214 	.m = { .min = 96, .max = 140 },
215 	.m1 = { .min = 18, .max = 26 },
216 	.m2 = { .min = 6, .max = 16 },
217 	.p = { .min = 4, .max = 128 },
218 	.p1 = { .min = 2, .max = 33 },
219 	.p2 = { .dot_limit = 165000,
220 		.p2_slow = 4, .p2_fast = 4 },
221 };
222 
223 static const struct intel_limit intel_limits_i8xx_lvds = {
224 	.dot = { .min = 25000, .max = 350000 },
225 	.vco = { .min = 908000, .max = 1512000 },
226 	.n = { .min = 2, .max = 16 },
227 	.m = { .min = 96, .max = 140 },
228 	.m1 = { .min = 18, .max = 26 },
229 	.m2 = { .min = 6, .max = 16 },
230 	.p = { .min = 4, .max = 128 },
231 	.p1 = { .min = 1, .max = 6 },
232 	.p2 = { .dot_limit = 165000,
233 		.p2_slow = 14, .p2_fast = 7 },
234 };
235 
236 static const struct intel_limit intel_limits_i9xx_sdvo = {
237 	.dot = { .min = 20000, .max = 400000 },
238 	.vco = { .min = 1400000, .max = 2800000 },
239 	.n = { .min = 1, .max = 6 },
240 	.m = { .min = 70, .max = 120 },
241 	.m1 = { .min = 8, .max = 18 },
242 	.m2 = { .min = 3, .max = 7 },
243 	.p = { .min = 5, .max = 80 },
244 	.p1 = { .min = 1, .max = 8 },
245 	.p2 = { .dot_limit = 200000,
246 		.p2_slow = 10, .p2_fast = 5 },
247 };
248 
249 static const struct intel_limit intel_limits_i9xx_lvds = {
250 	.dot = { .min = 20000, .max = 400000 },
251 	.vco = { .min = 1400000, .max = 2800000 },
252 	.n = { .min = 1, .max = 6 },
253 	.m = { .min = 70, .max = 120 },
254 	.m1 = { .min = 8, .max = 18 },
255 	.m2 = { .min = 3, .max = 7 },
256 	.p = { .min = 7, .max = 98 },
257 	.p1 = { .min = 1, .max = 8 },
258 	.p2 = { .dot_limit = 112000,
259 		.p2_slow = 14, .p2_fast = 7 },
260 };
261 
262 
263 static const struct intel_limit intel_limits_g4x_sdvo = {
264 	.dot = { .min = 25000, .max = 270000 },
265 	.vco = { .min = 1750000, .max = 3500000},
266 	.n = { .min = 1, .max = 4 },
267 	.m = { .min = 104, .max = 138 },
268 	.m1 = { .min = 17, .max = 23 },
269 	.m2 = { .min = 5, .max = 11 },
270 	.p = { .min = 10, .max = 30 },
271 	.p1 = { .min = 1, .max = 3},
272 	.p2 = { .dot_limit = 270000,
273 		.p2_slow = 10,
274 		.p2_fast = 10
275 	},
276 };
277 
278 static const struct intel_limit intel_limits_g4x_hdmi = {
279 	.dot = { .min = 22000, .max = 400000 },
280 	.vco = { .min = 1750000, .max = 3500000},
281 	.n = { .min = 1, .max = 4 },
282 	.m = { .min = 104, .max = 138 },
283 	.m1 = { .min = 16, .max = 23 },
284 	.m2 = { .min = 5, .max = 11 },
285 	.p = { .min = 5, .max = 80 },
286 	.p1 = { .min = 1, .max = 8},
287 	.p2 = { .dot_limit = 165000,
288 		.p2_slow = 10, .p2_fast = 5 },
289 };
290 
291 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
292 	.dot = { .min = 20000, .max = 115000 },
293 	.vco = { .min = 1750000, .max = 3500000 },
294 	.n = { .min = 1, .max = 3 },
295 	.m = { .min = 104, .max = 138 },
296 	.m1 = { .min = 17, .max = 23 },
297 	.m2 = { .min = 5, .max = 11 },
298 	.p = { .min = 28, .max = 112 },
299 	.p1 = { .min = 2, .max = 8 },
300 	.p2 = { .dot_limit = 0,
301 		.p2_slow = 14, .p2_fast = 14
302 	},
303 };
304 
305 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
306 	.dot = { .min = 80000, .max = 224000 },
307 	.vco = { .min = 1750000, .max = 3500000 },
308 	.n = { .min = 1, .max = 3 },
309 	.m = { .min = 104, .max = 138 },
310 	.m1 = { .min = 17, .max = 23 },
311 	.m2 = { .min = 5, .max = 11 },
312 	.p = { .min = 14, .max = 42 },
313 	.p1 = { .min = 2, .max = 6 },
314 	.p2 = { .dot_limit = 0,
315 		.p2_slow = 7, .p2_fast = 7
316 	},
317 };
318 
319 static const struct intel_limit pnv_limits_sdvo = {
320 	.dot = { .min = 20000, .max = 400000},
321 	.vco = { .min = 1700000, .max = 3500000 },
322 	/* Pineview's Ncounter is a ring counter */
323 	.n = { .min = 3, .max = 6 },
324 	.m = { .min = 2, .max = 256 },
325 	/* Pineview only has one combined m divider, which we treat as m2. */
326 	.m1 = { .min = 0, .max = 0 },
327 	.m2 = { .min = 0, .max = 254 },
328 	.p = { .min = 5, .max = 80 },
329 	.p1 = { .min = 1, .max = 8 },
330 	.p2 = { .dot_limit = 200000,
331 		.p2_slow = 10, .p2_fast = 5 },
332 };
333 
334 static const struct intel_limit pnv_limits_lvds = {
335 	.dot = { .min = 20000, .max = 400000 },
336 	.vco = { .min = 1700000, .max = 3500000 },
337 	.n = { .min = 3, .max = 6 },
338 	.m = { .min = 2, .max = 256 },
339 	.m1 = { .min = 0, .max = 0 },
340 	.m2 = { .min = 0, .max = 254 },
341 	.p = { .min = 7, .max = 112 },
342 	.p1 = { .min = 1, .max = 8 },
343 	.p2 = { .dot_limit = 112000,
344 		.p2_slow = 14, .p2_fast = 14 },
345 };
346 
347 /* Ironlake / Sandybridge
348  *
349  * We calculate clock using (register_value + 2) for N/M1/M2, so here
350  * the range value for them is (actual_value - 2).
351  */
352 static const struct intel_limit ilk_limits_dac = {
353 	.dot = { .min = 25000, .max = 350000 },
354 	.vco = { .min = 1760000, .max = 3510000 },
355 	.n = { .min = 1, .max = 5 },
356 	.m = { .min = 79, .max = 127 },
357 	.m1 = { .min = 12, .max = 22 },
358 	.m2 = { .min = 5, .max = 9 },
359 	.p = { .min = 5, .max = 80 },
360 	.p1 = { .min = 1, .max = 8 },
361 	.p2 = { .dot_limit = 225000,
362 		.p2_slow = 10, .p2_fast = 5 },
363 };
364 
365 static const struct intel_limit ilk_limits_single_lvds = {
366 	.dot = { .min = 25000, .max = 350000 },
367 	.vco = { .min = 1760000, .max = 3510000 },
368 	.n = { .min = 1, .max = 3 },
369 	.m = { .min = 79, .max = 118 },
370 	.m1 = { .min = 12, .max = 22 },
371 	.m2 = { .min = 5, .max = 9 },
372 	.p = { .min = 28, .max = 112 },
373 	.p1 = { .min = 2, .max = 8 },
374 	.p2 = { .dot_limit = 225000,
375 		.p2_slow = 14, .p2_fast = 14 },
376 };
377 
378 static const struct intel_limit ilk_limits_dual_lvds = {
379 	.dot = { .min = 25000, .max = 350000 },
380 	.vco = { .min = 1760000, .max = 3510000 },
381 	.n = { .min = 1, .max = 3 },
382 	.m = { .min = 79, .max = 127 },
383 	.m1 = { .min = 12, .max = 22 },
384 	.m2 = { .min = 5, .max = 9 },
385 	.p = { .min = 14, .max = 56 },
386 	.p1 = { .min = 2, .max = 8 },
387 	.p2 = { .dot_limit = 225000,
388 		.p2_slow = 7, .p2_fast = 7 },
389 };
390 
391 /* LVDS 100mhz refclk limits. */
392 static const struct intel_limit ilk_limits_single_lvds_100m = {
393 	.dot = { .min = 25000, .max = 350000 },
394 	.vco = { .min = 1760000, .max = 3510000 },
395 	.n = { .min = 1, .max = 2 },
396 	.m = { .min = 79, .max = 126 },
397 	.m1 = { .min = 12, .max = 22 },
398 	.m2 = { .min = 5, .max = 9 },
399 	.p = { .min = 28, .max = 112 },
400 	.p1 = { .min = 2, .max = 8 },
401 	.p2 = { .dot_limit = 225000,
402 		.p2_slow = 14, .p2_fast = 14 },
403 };
404 
405 static const struct intel_limit ilk_limits_dual_lvds_100m = {
406 	.dot = { .min = 25000, .max = 350000 },
407 	.vco = { .min = 1760000, .max = 3510000 },
408 	.n = { .min = 1, .max = 3 },
409 	.m = { .min = 79, .max = 126 },
410 	.m1 = { .min = 12, .max = 22 },
411 	.m2 = { .min = 5, .max = 9 },
412 	.p = { .min = 14, .max = 42 },
413 	.p1 = { .min = 2, .max = 6 },
414 	.p2 = { .dot_limit = 225000,
415 		.p2_slow = 7, .p2_fast = 7 },
416 };
417 
418 static const struct intel_limit intel_limits_vlv = {
419 	 /*
420 	  * These are the data rate limits (measured in fast clocks)
421 	  * since those are the strictest limits we have. The fast
422 	  * clock and actual rate limits are more relaxed, so checking
423 	  * them would make no difference.
424 	  */
425 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
426 	.vco = { .min = 4000000, .max = 6000000 },
427 	.n = { .min = 1, .max = 7 },
428 	.m1 = { .min = 2, .max = 3 },
429 	.m2 = { .min = 11, .max = 156 },
430 	.p1 = { .min = 2, .max = 3 },
431 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
432 };
433 
434 static const struct intel_limit intel_limits_chv = {
435 	/*
436 	 * These are the data rate limits (measured in fast clocks)
437 	 * since those are the strictest limits we have.  The fast
438 	 * clock and actual rate limits are more relaxed, so checking
439 	 * them would make no difference.
440 	 */
441 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
442 	.vco = { .min = 4800000, .max = 6480000 },
443 	.n = { .min = 1, .max = 1 },
444 	.m1 = { .min = 2, .max = 2 },
445 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
446 	.p1 = { .min = 2, .max = 4 },
447 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
448 };
449 
450 static const struct intel_limit intel_limits_bxt = {
451 	/* FIXME: find real dot limits */
452 	.dot = { .min = 0, .max = INT_MAX },
453 	.vco = { .min = 4800000, .max = 6700000 },
454 	.n = { .min = 1, .max = 1 },
455 	.m1 = { .min = 2, .max = 2 },
456 	/* FIXME: find real m2 limits */
457 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
458 	.p1 = { .min = 2, .max = 4 },
459 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
460 };
461 
462 /* WA Display #0827: Gen9:all */
463 static void
464 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
465 {
466 	if (enable)
467 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
468 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
469 	else
470 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
471 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
472 }
473 
474 /* Wa_2006604312:icl,ehl */
475 static void
476 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
477 		       bool enable)
478 {
479 	if (enable)
480 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
481 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
482 	else
483 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
484 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
485 }
486 
487 static bool
488 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
489 {
490 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
491 }
492 
493 static bool
494 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
495 {
496 	return crtc_state->sync_mode_slaves_mask != 0;
497 }
498 
499 bool
500 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
501 {
502 	return is_trans_port_sync_master(crtc_state) ||
503 		is_trans_port_sync_slave(crtc_state);
504 }
505 
506 /*
507  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
508  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
509  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
510  * The helpers' return value is the rate of the clock that is fed to the
511  * display engine's pipe which can be the above fast dot clock rate or a
512  * divided-down version of it.
513  */
514 /* m1 is reserved as 0 in Pineview, n is a ring counter */
515 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
516 {
517 	clock->m = clock->m2 + 2;
518 	clock->p = clock->p1 * clock->p2;
519 	if (WARN_ON(clock->n == 0 || clock->p == 0))
520 		return 0;
521 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
522 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
523 
524 	return clock->dot;
525 }
526 
527 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
528 {
529 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
530 }
531 
532 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
533 {
534 	clock->m = i9xx_dpll_compute_m(clock);
535 	clock->p = clock->p1 * clock->p2;
536 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
537 		return 0;
538 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
539 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
540 
541 	return clock->dot;
542 }
543 
544 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
545 {
546 	clock->m = clock->m1 * clock->m2;
547 	clock->p = clock->p1 * clock->p2;
548 	if (WARN_ON(clock->n == 0 || clock->p == 0))
549 		return 0;
550 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
551 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
552 
553 	return clock->dot / 5;
554 }
555 
556 int chv_calc_dpll_params(int refclk, struct dpll *clock)
557 {
558 	clock->m = clock->m1 * clock->m2;
559 	clock->p = clock->p1 * clock->p2;
560 	if (WARN_ON(clock->n == 0 || clock->p == 0))
561 		return 0;
562 	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
563 					   clock->n << 22);
564 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
565 
566 	return clock->dot / 5;
567 }
568 
569 /*
570  * Returns whether the given set of divisors are valid for a given refclk with
571  * the given connectors.
572  */
573 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
574 			       const struct intel_limit *limit,
575 			       const struct dpll *clock)
576 {
577 	if (clock->n < limit->n.min || limit->n.max < clock->n)
578 		return false;
579 	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
580 		return false;
581 	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
582 		return false;
583 	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
584 		return false;
585 
586 	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
587 	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
588 		if (clock->m1 <= clock->m2)
589 			return false;
590 
591 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
592 	    !IS_GEN9_LP(dev_priv)) {
593 		if (clock->p < limit->p.min || limit->p.max < clock->p)
594 			return false;
595 		if (clock->m < limit->m.min || limit->m.max < clock->m)
596 			return false;
597 	}
598 
599 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
600 		return false;
601 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
602 	 * connector, etc., rather than just a single range.
603 	 */
604 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
605 		return false;
606 
607 	return true;
608 }
609 
610 static int
611 i9xx_select_p2_div(const struct intel_limit *limit,
612 		   const struct intel_crtc_state *crtc_state,
613 		   int target)
614 {
615 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
616 
617 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
618 		/*
619 		 * For LVDS just rely on its current settings for dual-channel.
620 		 * We haven't figured out how to reliably set up different
621 		 * single/dual channel state, if we even can.
622 		 */
623 		if (intel_is_dual_link_lvds(dev_priv))
624 			return limit->p2.p2_fast;
625 		else
626 			return limit->p2.p2_slow;
627 	} else {
628 		if (target < limit->p2.dot_limit)
629 			return limit->p2.p2_slow;
630 		else
631 			return limit->p2.p2_fast;
632 	}
633 }
634 
635 /*
636  * Returns a set of divisors for the desired target clock with the given
637  * refclk, or FALSE.  The returned values represent the clock equation:
638  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
639  *
640  * Target and reference clocks are specified in kHz.
641  *
642  * If match_clock is provided, then best_clock P divider must match the P
643  * divider from @match_clock used for LVDS downclocking.
644  */
645 static bool
646 i9xx_find_best_dpll(const struct intel_limit *limit,
647 		    struct intel_crtc_state *crtc_state,
648 		    int target, int refclk, struct dpll *match_clock,
649 		    struct dpll *best_clock)
650 {
651 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
652 	struct dpll clock;
653 	int err = target;
654 
655 	memset(best_clock, 0, sizeof(*best_clock));
656 
657 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
658 
659 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
660 	     clock.m1++) {
661 		for (clock.m2 = limit->m2.min;
662 		     clock.m2 <= limit->m2.max; clock.m2++) {
663 			if (clock.m2 >= clock.m1)
664 				break;
665 			for (clock.n = limit->n.min;
666 			     clock.n <= limit->n.max; clock.n++) {
667 				for (clock.p1 = limit->p1.min;
668 					clock.p1 <= limit->p1.max; clock.p1++) {
669 					int this_err;
670 
671 					i9xx_calc_dpll_params(refclk, &clock);
672 					if (!intel_pll_is_valid(to_i915(dev),
673 								limit,
674 								&clock))
675 						continue;
676 					if (match_clock &&
677 					    clock.p != match_clock->p)
678 						continue;
679 
680 					this_err = abs(clock.dot - target);
681 					if (this_err < err) {
682 						*best_clock = clock;
683 						err = this_err;
684 					}
685 				}
686 			}
687 		}
688 	}
689 
690 	return (err != target);
691 }
692 
693 /*
694  * Returns a set of divisors for the desired target clock with the given
695  * refclk, or FALSE.  The returned values represent the clock equation:
696  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
697  *
698  * Target and reference clocks are specified in kHz.
699  *
700  * If match_clock is provided, then best_clock P divider must match the P
701  * divider from @match_clock used for LVDS downclocking.
702  */
703 static bool
704 pnv_find_best_dpll(const struct intel_limit *limit,
705 		   struct intel_crtc_state *crtc_state,
706 		   int target, int refclk, struct dpll *match_clock,
707 		   struct dpll *best_clock)
708 {
709 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
710 	struct dpll clock;
711 	int err = target;
712 
713 	memset(best_clock, 0, sizeof(*best_clock));
714 
715 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
716 
717 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
718 	     clock.m1++) {
719 		for (clock.m2 = limit->m2.min;
720 		     clock.m2 <= limit->m2.max; clock.m2++) {
721 			for (clock.n = limit->n.min;
722 			     clock.n <= limit->n.max; clock.n++) {
723 				for (clock.p1 = limit->p1.min;
724 					clock.p1 <= limit->p1.max; clock.p1++) {
725 					int this_err;
726 
727 					pnv_calc_dpll_params(refclk, &clock);
728 					if (!intel_pll_is_valid(to_i915(dev),
729 								limit,
730 								&clock))
731 						continue;
732 					if (match_clock &&
733 					    clock.p != match_clock->p)
734 						continue;
735 
736 					this_err = abs(clock.dot - target);
737 					if (this_err < err) {
738 						*best_clock = clock;
739 						err = this_err;
740 					}
741 				}
742 			}
743 		}
744 	}
745 
746 	return (err != target);
747 }
748 
749 /*
750  * Returns a set of divisors for the desired target clock with the given
751  * refclk, or FALSE.  The returned values represent the clock equation:
752  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
753  *
754  * Target and reference clocks are specified in kHz.
755  *
756  * If match_clock is provided, then best_clock P divider must match the P
757  * divider from @match_clock used for LVDS downclocking.
758  */
759 static bool
760 g4x_find_best_dpll(const struct intel_limit *limit,
761 		   struct intel_crtc_state *crtc_state,
762 		   int target, int refclk, struct dpll *match_clock,
763 		   struct dpll *best_clock)
764 {
765 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
766 	struct dpll clock;
767 	int max_n;
768 	bool found = false;
769 	/* approximately equals target * 0.00585 */
770 	int err_most = (target >> 8) + (target >> 9);
771 
772 	memset(best_clock, 0, sizeof(*best_clock));
773 
774 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
775 
776 	max_n = limit->n.max;
777 	/* based on hardware requirement, prefer smaller n to precision */
778 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
779 		/* based on hardware requirement, prefere larger m1,m2 */
780 		for (clock.m1 = limit->m1.max;
781 		     clock.m1 >= limit->m1.min; clock.m1--) {
782 			for (clock.m2 = limit->m2.max;
783 			     clock.m2 >= limit->m2.min; clock.m2--) {
784 				for (clock.p1 = limit->p1.max;
785 				     clock.p1 >= limit->p1.min; clock.p1--) {
786 					int this_err;
787 
788 					i9xx_calc_dpll_params(refclk, &clock);
789 					if (!intel_pll_is_valid(to_i915(dev),
790 								limit,
791 								&clock))
792 						continue;
793 
794 					this_err = abs(clock.dot - target);
795 					if (this_err < err_most) {
796 						*best_clock = clock;
797 						err_most = this_err;
798 						max_n = clock.n;
799 						found = true;
800 					}
801 				}
802 			}
803 		}
804 	}
805 	return found;
806 }
807 
808 /*
809  * Check if the calculated PLL configuration is more optimal compared to the
810  * best configuration and error found so far. Return the calculated error.
811  */
812 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
813 			       const struct dpll *calculated_clock,
814 			       const struct dpll *best_clock,
815 			       unsigned int best_error_ppm,
816 			       unsigned int *error_ppm)
817 {
818 	/*
819 	 * For CHV ignore the error and consider only the P value.
820 	 * Prefer a bigger P value based on HW requirements.
821 	 */
822 	if (IS_CHERRYVIEW(to_i915(dev))) {
823 		*error_ppm = 0;
824 
825 		return calculated_clock->p > best_clock->p;
826 	}
827 
828 	if (drm_WARN_ON_ONCE(dev, !target_freq))
829 		return false;
830 
831 	*error_ppm = div_u64(1000000ULL *
832 				abs(target_freq - calculated_clock->dot),
833 			     target_freq);
834 	/*
835 	 * Prefer a better P value over a better (smaller) error if the error
836 	 * is small. Ensure this preference for future configurations too by
837 	 * setting the error to 0.
838 	 */
839 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
840 		*error_ppm = 0;
841 
842 		return true;
843 	}
844 
845 	return *error_ppm + 10 < best_error_ppm;
846 }
847 
848 /*
849  * Returns a set of divisors for the desired target clock with the given
850  * refclk, or FALSE.  The returned values represent the clock equation:
851  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
852  */
853 static bool
854 vlv_find_best_dpll(const struct intel_limit *limit,
855 		   struct intel_crtc_state *crtc_state,
856 		   int target, int refclk, struct dpll *match_clock,
857 		   struct dpll *best_clock)
858 {
859 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
860 	struct drm_device *dev = crtc->base.dev;
861 	struct dpll clock;
862 	unsigned int bestppm = 1000000;
863 	/* min update 19.2 MHz */
864 	int max_n = min(limit->n.max, refclk / 19200);
865 	bool found = false;
866 
867 	target *= 5; /* fast clock */
868 
869 	memset(best_clock, 0, sizeof(*best_clock));
870 
871 	/* based on hardware requirement, prefer smaller n to precision */
872 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
873 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
874 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
875 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
876 				clock.p = clock.p1 * clock.p2;
877 				/* based on hardware requirement, prefer bigger m1,m2 values */
878 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
879 					unsigned int ppm;
880 
881 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
882 								     refclk * clock.m1);
883 
884 					vlv_calc_dpll_params(refclk, &clock);
885 
886 					if (!intel_pll_is_valid(to_i915(dev),
887 								limit,
888 								&clock))
889 						continue;
890 
891 					if (!vlv_PLL_is_optimal(dev, target,
892 								&clock,
893 								best_clock,
894 								bestppm, &ppm))
895 						continue;
896 
897 					*best_clock = clock;
898 					bestppm = ppm;
899 					found = true;
900 				}
901 			}
902 		}
903 	}
904 
905 	return found;
906 }
907 
908 /*
909  * Returns a set of divisors for the desired target clock with the given
910  * refclk, or FALSE.  The returned values represent the clock equation:
911  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
912  */
913 static bool
914 chv_find_best_dpll(const struct intel_limit *limit,
915 		   struct intel_crtc_state *crtc_state,
916 		   int target, int refclk, struct dpll *match_clock,
917 		   struct dpll *best_clock)
918 {
919 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
920 	struct drm_device *dev = crtc->base.dev;
921 	unsigned int best_error_ppm;
922 	struct dpll clock;
923 	u64 m2;
924 	int found = false;
925 
926 	memset(best_clock, 0, sizeof(*best_clock));
927 	best_error_ppm = 1000000;
928 
929 	/*
930 	 * Based on hardware doc, the n always set to 1, and m1 always
931 	 * set to 2.  If requires to support 200Mhz refclk, we need to
932 	 * revisit this because n may not 1 anymore.
933 	 */
934 	clock.n = 1;
935 	clock.m1 = 2;
936 	target *= 5;	/* fast clock */
937 
938 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
939 		for (clock.p2 = limit->p2.p2_fast;
940 				clock.p2 >= limit->p2.p2_slow;
941 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
942 			unsigned int error_ppm;
943 
944 			clock.p = clock.p1 * clock.p2;
945 
946 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
947 						   refclk * clock.m1);
948 
949 			if (m2 > INT_MAX/clock.m1)
950 				continue;
951 
952 			clock.m2 = m2;
953 
954 			chv_calc_dpll_params(refclk, &clock);
955 
956 			if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
957 				continue;
958 
959 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
960 						best_error_ppm, &error_ppm))
961 				continue;
962 
963 			*best_clock = clock;
964 			best_error_ppm = error_ppm;
965 			found = true;
966 		}
967 	}
968 
969 	return found;
970 }
971 
972 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
973 			struct dpll *best_clock)
974 {
975 	int refclk = 100000;
976 	const struct intel_limit *limit = &intel_limits_bxt;
977 
978 	return chv_find_best_dpll(limit, crtc_state,
979 				  crtc_state->port_clock, refclk,
980 				  NULL, best_clock);
981 }
982 
983 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
984 				    enum pipe pipe)
985 {
986 	i915_reg_t reg = PIPEDSL(pipe);
987 	u32 line1, line2;
988 	u32 line_mask;
989 
990 	if (IS_GEN(dev_priv, 2))
991 		line_mask = DSL_LINEMASK_GEN2;
992 	else
993 		line_mask = DSL_LINEMASK_GEN3;
994 
995 	line1 = intel_de_read(dev_priv, reg) & line_mask;
996 	msleep(5);
997 	line2 = intel_de_read(dev_priv, reg) & line_mask;
998 
999 	return line1 != line2;
1000 }
1001 
1002 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1003 {
1004 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1005 	enum pipe pipe = crtc->pipe;
1006 
1007 	/* Wait for the display line to settle/start moving */
1008 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1009 		drm_err(&dev_priv->drm,
1010 			"pipe %c scanline %s wait timed out\n",
1011 			pipe_name(pipe), onoff(state));
1012 }
1013 
1014 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1015 {
1016 	wait_for_pipe_scanline_moving(crtc, false);
1017 }
1018 
1019 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1020 {
1021 	wait_for_pipe_scanline_moving(crtc, true);
1022 }
1023 
1024 static void
1025 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1026 {
1027 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1028 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1029 
1030 	if (INTEL_GEN(dev_priv) >= 4) {
1031 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1032 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1033 
1034 		/* Wait for the Pipe State to go off */
1035 		if (intel_de_wait_for_clear(dev_priv, reg,
1036 					    I965_PIPECONF_ACTIVE, 100))
1037 			drm_WARN(&dev_priv->drm, 1,
1038 				 "pipe_off wait timed out\n");
1039 	} else {
1040 		intel_wait_for_pipe_scanline_stopped(crtc);
1041 	}
1042 }
1043 
1044 /* Only for pre-ILK configs */
1045 void assert_pll(struct drm_i915_private *dev_priv,
1046 		enum pipe pipe, bool state)
1047 {
1048 	u32 val;
1049 	bool cur_state;
1050 
1051 	val = intel_de_read(dev_priv, DPLL(pipe));
1052 	cur_state = !!(val & DPLL_VCO_ENABLE);
1053 	I915_STATE_WARN(cur_state != state,
1054 	     "PLL state assertion failure (expected %s, current %s)\n",
1055 			onoff(state), onoff(cur_state));
1056 }
1057 
1058 /* XXX: the dsi pll is shared between MIPI DSI ports */
1059 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1060 {
1061 	u32 val;
1062 	bool cur_state;
1063 
1064 	vlv_cck_get(dev_priv);
1065 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1066 	vlv_cck_put(dev_priv);
1067 
1068 	cur_state = val & DSI_PLL_VCO_EN;
1069 	I915_STATE_WARN(cur_state != state,
1070 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1071 			onoff(state), onoff(cur_state));
1072 }
1073 
1074 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1075 			  enum pipe pipe, bool state)
1076 {
1077 	bool cur_state;
1078 
1079 	if (HAS_DDI(dev_priv)) {
1080 		/*
1081 		 * DDI does not have a specific FDI_TX register.
1082 		 *
1083 		 * FDI is never fed from EDP transcoder
1084 		 * so pipe->transcoder cast is fine here.
1085 		 */
1086 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
1087 		u32 val = intel_de_read(dev_priv,
1088 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
1089 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1090 	} else {
1091 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1092 		cur_state = !!(val & FDI_TX_ENABLE);
1093 	}
1094 	I915_STATE_WARN(cur_state != state,
1095 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1096 			onoff(state), onoff(cur_state));
1097 }
1098 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1099 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1100 
1101 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1102 			  enum pipe pipe, bool state)
1103 {
1104 	u32 val;
1105 	bool cur_state;
1106 
1107 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1108 	cur_state = !!(val & FDI_RX_ENABLE);
1109 	I915_STATE_WARN(cur_state != state,
1110 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1111 			onoff(state), onoff(cur_state));
1112 }
1113 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1114 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1115 
1116 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1117 				      enum pipe pipe)
1118 {
1119 	u32 val;
1120 
1121 	/* ILK FDI PLL is always enabled */
1122 	if (IS_GEN(dev_priv, 5))
1123 		return;
1124 
1125 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1126 	if (HAS_DDI(dev_priv))
1127 		return;
1128 
1129 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1130 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1131 }
1132 
1133 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1134 		       enum pipe pipe, bool state)
1135 {
1136 	u32 val;
1137 	bool cur_state;
1138 
1139 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1140 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1141 	I915_STATE_WARN(cur_state != state,
1142 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1143 			onoff(state), onoff(cur_state));
1144 }
1145 
1146 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1147 {
1148 	i915_reg_t pp_reg;
1149 	u32 val;
1150 	enum pipe panel_pipe = INVALID_PIPE;
1151 	bool locked = true;
1152 
1153 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1154 		return;
1155 
1156 	if (HAS_PCH_SPLIT(dev_priv)) {
1157 		u32 port_sel;
1158 
1159 		pp_reg = PP_CONTROL(0);
1160 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1161 
1162 		switch (port_sel) {
1163 		case PANEL_PORT_SELECT_LVDS:
1164 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1165 			break;
1166 		case PANEL_PORT_SELECT_DPA:
1167 			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1168 			break;
1169 		case PANEL_PORT_SELECT_DPC:
1170 			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1171 			break;
1172 		case PANEL_PORT_SELECT_DPD:
1173 			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1174 			break;
1175 		default:
1176 			MISSING_CASE(port_sel);
1177 			break;
1178 		}
1179 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1180 		/* presumably write lock depends on pipe, not port select */
1181 		pp_reg = PP_CONTROL(pipe);
1182 		panel_pipe = pipe;
1183 	} else {
1184 		u32 port_sel;
1185 
1186 		pp_reg = PP_CONTROL(0);
1187 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1188 
1189 		drm_WARN_ON(&dev_priv->drm,
1190 			    port_sel != PANEL_PORT_SELECT_LVDS);
1191 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1192 	}
1193 
1194 	val = intel_de_read(dev_priv, pp_reg);
1195 	if (!(val & PANEL_POWER_ON) ||
1196 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1197 		locked = false;
1198 
1199 	I915_STATE_WARN(panel_pipe == pipe && locked,
1200 	     "panel assertion failure, pipe %c regs locked\n",
1201 	     pipe_name(pipe));
1202 }
1203 
1204 void assert_pipe(struct drm_i915_private *dev_priv,
1205 		 enum transcoder cpu_transcoder, bool state)
1206 {
1207 	bool cur_state;
1208 	enum intel_display_power_domain power_domain;
1209 	intel_wakeref_t wakeref;
1210 
1211 	/* we keep both pipes enabled on 830 */
1212 	if (IS_I830(dev_priv))
1213 		state = true;
1214 
1215 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1216 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1217 	if (wakeref) {
1218 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1219 		cur_state = !!(val & PIPECONF_ENABLE);
1220 
1221 		intel_display_power_put(dev_priv, power_domain, wakeref);
1222 	} else {
1223 		cur_state = false;
1224 	}
1225 
1226 	I915_STATE_WARN(cur_state != state,
1227 			"transcoder %s assertion failure (expected %s, current %s)\n",
1228 			transcoder_name(cpu_transcoder),
1229 			onoff(state), onoff(cur_state));
1230 }
1231 
1232 static void assert_plane(struct intel_plane *plane, bool state)
1233 {
1234 	enum pipe pipe;
1235 	bool cur_state;
1236 
1237 	cur_state = plane->get_hw_state(plane, &pipe);
1238 
1239 	I915_STATE_WARN(cur_state != state,
1240 			"%s assertion failure (expected %s, current %s)\n",
1241 			plane->base.name, onoff(state), onoff(cur_state));
1242 }
1243 
1244 #define assert_plane_enabled(p) assert_plane(p, true)
1245 #define assert_plane_disabled(p) assert_plane(p, false)
1246 
1247 static void assert_planes_disabled(struct intel_crtc *crtc)
1248 {
1249 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1250 	struct intel_plane *plane;
1251 
1252 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1253 		assert_plane_disabled(plane);
1254 }
1255 
1256 static void assert_vblank_disabled(struct drm_crtc *crtc)
1257 {
1258 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1259 		drm_crtc_vblank_put(crtc);
1260 }
1261 
1262 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1263 				    enum pipe pipe)
1264 {
1265 	u32 val;
1266 	bool enabled;
1267 
1268 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1269 	enabled = !!(val & TRANS_ENABLE);
1270 	I915_STATE_WARN(enabled,
1271 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1272 	     pipe_name(pipe));
1273 }
1274 
1275 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1276 				   enum pipe pipe, enum port port,
1277 				   i915_reg_t dp_reg)
1278 {
1279 	enum pipe port_pipe;
1280 	bool state;
1281 
1282 	state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1283 
1284 	I915_STATE_WARN(state && port_pipe == pipe,
1285 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
1286 			port_name(port), pipe_name(pipe));
1287 
1288 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1289 			"IBX PCH DP %c still using transcoder B\n",
1290 			port_name(port));
1291 }
1292 
1293 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1294 				     enum pipe pipe, enum port port,
1295 				     i915_reg_t hdmi_reg)
1296 {
1297 	enum pipe port_pipe;
1298 	bool state;
1299 
1300 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1301 
1302 	I915_STATE_WARN(state && port_pipe == pipe,
1303 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1304 			port_name(port), pipe_name(pipe));
1305 
1306 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1307 			"IBX PCH HDMI %c still using transcoder B\n",
1308 			port_name(port));
1309 }
1310 
1311 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1312 				      enum pipe pipe)
1313 {
1314 	enum pipe port_pipe;
1315 
1316 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1317 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1318 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1319 
1320 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1321 			port_pipe == pipe,
1322 			"PCH VGA enabled on transcoder %c, should be disabled\n",
1323 			pipe_name(pipe));
1324 
1325 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1326 			port_pipe == pipe,
1327 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
1328 			pipe_name(pipe));
1329 
1330 	/* PCH SDVOB multiplex with HDMIB */
1331 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1332 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1333 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1334 }
1335 
1336 static void _vlv_enable_pll(struct intel_crtc *crtc,
1337 			    const struct intel_crtc_state *pipe_config)
1338 {
1339 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1340 	enum pipe pipe = crtc->pipe;
1341 
1342 	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1343 	intel_de_posting_read(dev_priv, DPLL(pipe));
1344 	udelay(150);
1345 
1346 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1347 		drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1348 }
1349 
1350 static void vlv_enable_pll(struct intel_crtc *crtc,
1351 			   const struct intel_crtc_state *pipe_config)
1352 {
1353 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1354 	enum pipe pipe = crtc->pipe;
1355 
1356 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1357 
1358 	/* PLL is protected by panel, make sure we can write it */
1359 	assert_panel_unlocked(dev_priv, pipe);
1360 
1361 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1362 		_vlv_enable_pll(crtc, pipe_config);
1363 
1364 	intel_de_write(dev_priv, DPLL_MD(pipe),
1365 		       pipe_config->dpll_hw_state.dpll_md);
1366 	intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1367 }
1368 
1369 
1370 static void _chv_enable_pll(struct intel_crtc *crtc,
1371 			    const struct intel_crtc_state *pipe_config)
1372 {
1373 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1374 	enum pipe pipe = crtc->pipe;
1375 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1376 	u32 tmp;
1377 
1378 	vlv_dpio_get(dev_priv);
1379 
1380 	/* Enable back the 10bit clock to display controller */
1381 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1382 	tmp |= DPIO_DCLKP_EN;
1383 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1384 
1385 	vlv_dpio_put(dev_priv);
1386 
1387 	/*
1388 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1389 	 */
1390 	udelay(1);
1391 
1392 	/* Enable PLL */
1393 	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1394 
1395 	/* Check PLL is locked */
1396 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1397 		drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1398 }
1399 
1400 static void chv_enable_pll(struct intel_crtc *crtc,
1401 			   const struct intel_crtc_state *pipe_config)
1402 {
1403 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1404 	enum pipe pipe = crtc->pipe;
1405 
1406 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1407 
1408 	/* PLL is protected by panel, make sure we can write it */
1409 	assert_panel_unlocked(dev_priv, pipe);
1410 
1411 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1412 		_chv_enable_pll(crtc, pipe_config);
1413 
1414 	if (pipe != PIPE_A) {
1415 		/*
1416 		 * WaPixelRepeatModeFixForC0:chv
1417 		 *
1418 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1419 		 * the value from DPLLBMD to either pipe B or C.
1420 		 */
1421 		intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1422 		intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1423 			       pipe_config->dpll_hw_state.dpll_md);
1424 		intel_de_write(dev_priv, CBR4_VLV, 0);
1425 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1426 
1427 		/*
1428 		 * DPLLB VGA mode also seems to cause problems.
1429 		 * We should always have it disabled.
1430 		 */
1431 		drm_WARN_ON(&dev_priv->drm,
1432 			    (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1433 			     DPLL_VGA_MODE_DIS) == 0);
1434 	} else {
1435 		intel_de_write(dev_priv, DPLL_MD(pipe),
1436 			       pipe_config->dpll_hw_state.dpll_md);
1437 		intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1438 	}
1439 }
1440 
1441 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1442 {
1443 	if (IS_I830(dev_priv))
1444 		return false;
1445 
1446 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1447 }
1448 
1449 static void i9xx_enable_pll(struct intel_crtc *crtc,
1450 			    const struct intel_crtc_state *crtc_state)
1451 {
1452 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1453 	i915_reg_t reg = DPLL(crtc->pipe);
1454 	u32 dpll = crtc_state->dpll_hw_state.dpll;
1455 	int i;
1456 
1457 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1458 
1459 	/* PLL is protected by panel, make sure we can write it */
1460 	if (i9xx_has_pps(dev_priv))
1461 		assert_panel_unlocked(dev_priv, crtc->pipe);
1462 
1463 	/*
1464 	 * Apparently we need to have VGA mode enabled prior to changing
1465 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1466 	 * dividers, even though the register value does change.
1467 	 */
1468 	intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1469 	intel_de_write(dev_priv, reg, dpll);
1470 
1471 	/* Wait for the clocks to stabilize. */
1472 	intel_de_posting_read(dev_priv, reg);
1473 	udelay(150);
1474 
1475 	if (INTEL_GEN(dev_priv) >= 4) {
1476 		intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1477 			       crtc_state->dpll_hw_state.dpll_md);
1478 	} else {
1479 		/* The pixel multiplier can only be updated once the
1480 		 * DPLL is enabled and the clocks are stable.
1481 		 *
1482 		 * So write it again.
1483 		 */
1484 		intel_de_write(dev_priv, reg, dpll);
1485 	}
1486 
1487 	/* We do this three times for luck */
1488 	for (i = 0; i < 3; i++) {
1489 		intel_de_write(dev_priv, reg, dpll);
1490 		intel_de_posting_read(dev_priv, reg);
1491 		udelay(150); /* wait for warmup */
1492 	}
1493 }
1494 
1495 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1496 {
1497 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1498 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1499 	enum pipe pipe = crtc->pipe;
1500 
1501 	/* Don't disable pipe or pipe PLLs if needed */
1502 	if (IS_I830(dev_priv))
1503 		return;
1504 
1505 	/* Make sure the pipe isn't still relying on us */
1506 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1507 
1508 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1509 	intel_de_posting_read(dev_priv, DPLL(pipe));
1510 }
1511 
1512 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1513 {
1514 	u32 val;
1515 
1516 	/* Make sure the pipe isn't still relying on us */
1517 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1518 
1519 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1520 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1521 	if (pipe != PIPE_A)
1522 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1523 
1524 	intel_de_write(dev_priv, DPLL(pipe), val);
1525 	intel_de_posting_read(dev_priv, DPLL(pipe));
1526 }
1527 
1528 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1529 {
1530 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1531 	u32 val;
1532 
1533 	/* Make sure the pipe isn't still relying on us */
1534 	assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1535 
1536 	val = DPLL_SSC_REF_CLK_CHV |
1537 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1538 	if (pipe != PIPE_A)
1539 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1540 
1541 	intel_de_write(dev_priv, DPLL(pipe), val);
1542 	intel_de_posting_read(dev_priv, DPLL(pipe));
1543 
1544 	vlv_dpio_get(dev_priv);
1545 
1546 	/* Disable 10bit clock to display controller */
1547 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1548 	val &= ~DPIO_DCLKP_EN;
1549 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1550 
1551 	vlv_dpio_put(dev_priv);
1552 }
1553 
1554 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1555 			 struct intel_digital_port *dig_port,
1556 			 unsigned int expected_mask)
1557 {
1558 	u32 port_mask;
1559 	i915_reg_t dpll_reg;
1560 
1561 	switch (dig_port->base.port) {
1562 	case PORT_B:
1563 		port_mask = DPLL_PORTB_READY_MASK;
1564 		dpll_reg = DPLL(0);
1565 		break;
1566 	case PORT_C:
1567 		port_mask = DPLL_PORTC_READY_MASK;
1568 		dpll_reg = DPLL(0);
1569 		expected_mask <<= 4;
1570 		break;
1571 	case PORT_D:
1572 		port_mask = DPLL_PORTD_READY_MASK;
1573 		dpll_reg = DPIO_PHY_STATUS;
1574 		break;
1575 	default:
1576 		BUG();
1577 	}
1578 
1579 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
1580 				       port_mask, expected_mask, 1000))
1581 		drm_WARN(&dev_priv->drm, 1,
1582 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1583 			 dig_port->base.base.base.id, dig_port->base.base.name,
1584 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
1585 			 expected_mask);
1586 }
1587 
1588 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1589 {
1590 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1591 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1592 	enum pipe pipe = crtc->pipe;
1593 	i915_reg_t reg;
1594 	u32 val, pipeconf_val;
1595 
1596 	/* Make sure PCH DPLL is enabled */
1597 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1598 
1599 	/* FDI must be feeding us bits for PCH ports */
1600 	assert_fdi_tx_enabled(dev_priv, pipe);
1601 	assert_fdi_rx_enabled(dev_priv, pipe);
1602 
1603 	if (HAS_PCH_CPT(dev_priv)) {
1604 		reg = TRANS_CHICKEN2(pipe);
1605 		val = intel_de_read(dev_priv, reg);
1606 		/*
1607 		 * Workaround: Set the timing override bit
1608 		 * before enabling the pch transcoder.
1609 		 */
1610 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1611 		/* Configure frame start delay to match the CPU */
1612 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1613 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1614 		intel_de_write(dev_priv, reg, val);
1615 	}
1616 
1617 	reg = PCH_TRANSCONF(pipe);
1618 	val = intel_de_read(dev_priv, reg);
1619 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1620 
1621 	if (HAS_PCH_IBX(dev_priv)) {
1622 		/* Configure frame start delay to match the CPU */
1623 		val &= ~TRANS_FRAME_START_DELAY_MASK;
1624 		val |= TRANS_FRAME_START_DELAY(0);
1625 
1626 		/*
1627 		 * Make the BPC in transcoder be consistent with
1628 		 * that in pipeconf reg. For HDMI we must use 8bpc
1629 		 * here for both 8bpc and 12bpc.
1630 		 */
1631 		val &= ~PIPECONF_BPC_MASK;
1632 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1633 			val |= PIPECONF_8BPC;
1634 		else
1635 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1636 	}
1637 
1638 	val &= ~TRANS_INTERLACE_MASK;
1639 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1640 		if (HAS_PCH_IBX(dev_priv) &&
1641 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1642 			val |= TRANS_LEGACY_INTERLACED_ILK;
1643 		else
1644 			val |= TRANS_INTERLACED;
1645 	} else {
1646 		val |= TRANS_PROGRESSIVE;
1647 	}
1648 
1649 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1650 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1651 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1652 			pipe_name(pipe));
1653 }
1654 
1655 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1656 				      enum transcoder cpu_transcoder)
1657 {
1658 	u32 val, pipeconf_val;
1659 
1660 	/* FDI must be feeding us bits for PCH ports */
1661 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1662 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
1663 
1664 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1665 	/* Workaround: set timing override bit. */
1666 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1667 	/* Configure frame start delay to match the CPU */
1668 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1669 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1670 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1671 
1672 	val = TRANS_ENABLE;
1673 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1674 
1675 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1676 	    PIPECONF_INTERLACED_ILK)
1677 		val |= TRANS_INTERLACED;
1678 	else
1679 		val |= TRANS_PROGRESSIVE;
1680 
1681 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
1682 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1683 				  TRANS_STATE_ENABLE, 100))
1684 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1685 }
1686 
1687 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1688 				       enum pipe pipe)
1689 {
1690 	i915_reg_t reg;
1691 	u32 val;
1692 
1693 	/* FDI relies on the transcoder */
1694 	assert_fdi_tx_disabled(dev_priv, pipe);
1695 	assert_fdi_rx_disabled(dev_priv, pipe);
1696 
1697 	/* Ports must be off as well */
1698 	assert_pch_ports_disabled(dev_priv, pipe);
1699 
1700 	reg = PCH_TRANSCONF(pipe);
1701 	val = intel_de_read(dev_priv, reg);
1702 	val &= ~TRANS_ENABLE;
1703 	intel_de_write(dev_priv, reg, val);
1704 	/* wait for PCH transcoder off, transcoder state */
1705 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1706 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1707 			pipe_name(pipe));
1708 
1709 	if (HAS_PCH_CPT(dev_priv)) {
1710 		/* Workaround: Clear the timing override chicken bit again. */
1711 		reg = TRANS_CHICKEN2(pipe);
1712 		val = intel_de_read(dev_priv, reg);
1713 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1714 		intel_de_write(dev_priv, reg, val);
1715 	}
1716 }
1717 
1718 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1719 {
1720 	u32 val;
1721 
1722 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
1723 	val &= ~TRANS_ENABLE;
1724 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
1725 	/* wait for PCH transcoder off, transcoder state */
1726 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1727 				    TRANS_STATE_ENABLE, 50))
1728 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1729 
1730 	/* Workaround: clear timing override bit. */
1731 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1732 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1733 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1734 }
1735 
1736 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1737 {
1738 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1739 
1740 	if (HAS_PCH_LPT(dev_priv))
1741 		return PIPE_A;
1742 	else
1743 		return crtc->pipe;
1744 }
1745 
1746 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1747 {
1748 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1749 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1750 	u32 mode_flags = crtc->mode_flags;
1751 
1752 	/*
1753 	 * From Gen 11, In case of dsi cmd mode, frame counter wouldnt
1754 	 * have updated at the beginning of TE, if we want to use
1755 	 * the hw counter, then we would find it updated in only
1756 	 * the next TE, hence switching to sw counter.
1757 	 */
1758 	if (mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | I915_MODE_FLAG_DSI_USE_TE1))
1759 		return 0;
1760 
1761 	/*
1762 	 * On i965gm the hardware frame counter reads
1763 	 * zero when the TV encoder is enabled :(
1764 	 */
1765 	if (IS_I965GM(dev_priv) &&
1766 	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1767 		return 0;
1768 
1769 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1770 		return 0xffffffff; /* full 32 bit counter */
1771 	else if (INTEL_GEN(dev_priv) >= 3)
1772 		return 0xffffff; /* only 24 bits of frame count */
1773 	else
1774 		return 0; /* Gen2 doesn't have a hardware frame counter */
1775 }
1776 
1777 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1778 {
1779 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1780 
1781 	assert_vblank_disabled(&crtc->base);
1782 	drm_crtc_set_max_vblank_count(&crtc->base,
1783 				      intel_crtc_max_vblank_count(crtc_state));
1784 	drm_crtc_vblank_on(&crtc->base);
1785 }
1786 
1787 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1788 {
1789 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1790 
1791 	drm_crtc_vblank_off(&crtc->base);
1792 	assert_vblank_disabled(&crtc->base);
1793 }
1794 
1795 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1796 {
1797 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1798 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1799 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1800 	enum pipe pipe = crtc->pipe;
1801 	i915_reg_t reg;
1802 	u32 val;
1803 
1804 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1805 
1806 	assert_planes_disabled(crtc);
1807 
1808 	/*
1809 	 * A pipe without a PLL won't actually be able to drive bits from
1810 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1811 	 * need the check.
1812 	 */
1813 	if (HAS_GMCH(dev_priv)) {
1814 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1815 			assert_dsi_pll_enabled(dev_priv);
1816 		else
1817 			assert_pll_enabled(dev_priv, pipe);
1818 	} else {
1819 		if (new_crtc_state->has_pch_encoder) {
1820 			/* if driving the PCH, we need FDI enabled */
1821 			assert_fdi_rx_pll_enabled(dev_priv,
1822 						  intel_crtc_pch_transcoder(crtc));
1823 			assert_fdi_tx_pll_enabled(dev_priv,
1824 						  (enum pipe) cpu_transcoder);
1825 		}
1826 		/* FIXME: assert CPU port conditions for SNB+ */
1827 	}
1828 
1829 	trace_intel_pipe_enable(crtc);
1830 
1831 	reg = PIPECONF(cpu_transcoder);
1832 	val = intel_de_read(dev_priv, reg);
1833 	if (val & PIPECONF_ENABLE) {
1834 		/* we keep both pipes enabled on 830 */
1835 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1836 		return;
1837 	}
1838 
1839 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1840 	intel_de_posting_read(dev_priv, reg);
1841 
1842 	/*
1843 	 * Until the pipe starts PIPEDSL reads will return a stale value,
1844 	 * which causes an apparent vblank timestamp jump when PIPEDSL
1845 	 * resets to its proper value. That also messes up the frame count
1846 	 * when it's derived from the timestamps. So let's wait for the
1847 	 * pipe to start properly before we call drm_crtc_vblank_on()
1848 	 */
1849 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1850 		intel_wait_for_pipe_scanline_moving(crtc);
1851 }
1852 
1853 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1854 {
1855 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1856 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1857 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1858 	enum pipe pipe = crtc->pipe;
1859 	i915_reg_t reg;
1860 	u32 val;
1861 
1862 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1863 
1864 	/*
1865 	 * Make sure planes won't keep trying to pump pixels to us,
1866 	 * or we might hang the display.
1867 	 */
1868 	assert_planes_disabled(crtc);
1869 
1870 	trace_intel_pipe_disable(crtc);
1871 
1872 	reg = PIPECONF(cpu_transcoder);
1873 	val = intel_de_read(dev_priv, reg);
1874 	if ((val & PIPECONF_ENABLE) == 0)
1875 		return;
1876 
1877 	/*
1878 	 * Double wide has implications for planes
1879 	 * so best keep it disabled when not needed.
1880 	 */
1881 	if (old_crtc_state->double_wide)
1882 		val &= ~PIPECONF_DOUBLE_WIDE;
1883 
1884 	/* Don't disable pipe or pipe PLLs if needed */
1885 	if (!IS_I830(dev_priv))
1886 		val &= ~PIPECONF_ENABLE;
1887 
1888 	intel_de_write(dev_priv, reg, val);
1889 	if ((val & PIPECONF_ENABLE) == 0)
1890 		intel_wait_for_pipe_off(old_crtc_state);
1891 }
1892 
1893 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1894 {
1895 	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1896 }
1897 
1898 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1899 {
1900 	if (!is_ccs_modifier(fb->modifier))
1901 		return false;
1902 
1903 	return plane >= fb->format->num_planes / 2;
1904 }
1905 
1906 static bool is_gen12_ccs_modifier(u64 modifier)
1907 {
1908 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1909 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1910 
1911 }
1912 
1913 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1914 {
1915 	return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1916 }
1917 
1918 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1919 {
1920 	if (is_ccs_modifier(fb->modifier))
1921 		return is_ccs_plane(fb, plane);
1922 
1923 	return plane == 1;
1924 }
1925 
1926 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1927 {
1928 	drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1929 		    (main_plane && main_plane >= fb->format->num_planes / 2));
1930 
1931 	return fb->format->num_planes / 2 + main_plane;
1932 }
1933 
1934 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1935 {
1936 	drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1937 		    ccs_plane < fb->format->num_planes / 2);
1938 
1939 	return ccs_plane - fb->format->num_planes / 2;
1940 }
1941 
1942 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1943 {
1944 	struct drm_i915_private *i915 = to_i915(fb->dev);
1945 
1946 	if (is_ccs_modifier(fb->modifier))
1947 		return main_to_ccs_plane(fb, main_plane);
1948 	else if (INTEL_GEN(i915) < 11 &&
1949 		 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
1950 		return 1;
1951 	else
1952 		return 0;
1953 }
1954 
1955 bool
1956 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1957 				    uint64_t modifier)
1958 {
1959 	return info->is_yuv &&
1960 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1961 }
1962 
1963 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
1964 				   int color_plane)
1965 {
1966 	return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
1967 	       color_plane == 1;
1968 }
1969 
1970 static unsigned int
1971 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1972 {
1973 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1974 	unsigned int cpp = fb->format->cpp[color_plane];
1975 
1976 	switch (fb->modifier) {
1977 	case DRM_FORMAT_MOD_LINEAR:
1978 		return intel_tile_size(dev_priv);
1979 	case I915_FORMAT_MOD_X_TILED:
1980 		if (IS_GEN(dev_priv, 2))
1981 			return 128;
1982 		else
1983 			return 512;
1984 	case I915_FORMAT_MOD_Y_TILED_CCS:
1985 		if (is_ccs_plane(fb, color_plane))
1986 			return 128;
1987 		fallthrough;
1988 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1989 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1990 		if (is_ccs_plane(fb, color_plane))
1991 			return 64;
1992 		fallthrough;
1993 	case I915_FORMAT_MOD_Y_TILED:
1994 		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1995 			return 128;
1996 		else
1997 			return 512;
1998 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1999 		if (is_ccs_plane(fb, color_plane))
2000 			return 128;
2001 		fallthrough;
2002 	case I915_FORMAT_MOD_Yf_TILED:
2003 		switch (cpp) {
2004 		case 1:
2005 			return 64;
2006 		case 2:
2007 		case 4:
2008 			return 128;
2009 		case 8:
2010 		case 16:
2011 			return 256;
2012 		default:
2013 			MISSING_CASE(cpp);
2014 			return cpp;
2015 		}
2016 		break;
2017 	default:
2018 		MISSING_CASE(fb->modifier);
2019 		return cpp;
2020 	}
2021 }
2022 
2023 static unsigned int
2024 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2025 {
2026 	if (is_gen12_ccs_plane(fb, color_plane))
2027 		return 1;
2028 
2029 	return intel_tile_size(to_i915(fb->dev)) /
2030 		intel_tile_width_bytes(fb, color_plane);
2031 }
2032 
2033 /* Return the tile dimensions in pixel units */
2034 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2035 			    unsigned int *tile_width,
2036 			    unsigned int *tile_height)
2037 {
2038 	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2039 	unsigned int cpp = fb->format->cpp[color_plane];
2040 
2041 	*tile_width = tile_width_bytes / cpp;
2042 	*tile_height = intel_tile_height(fb, color_plane);
2043 }
2044 
2045 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2046 					int color_plane)
2047 {
2048 	unsigned int tile_width, tile_height;
2049 
2050 	intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2051 
2052 	return fb->pitches[color_plane] * tile_height;
2053 }
2054 
2055 unsigned int
2056 intel_fb_align_height(const struct drm_framebuffer *fb,
2057 		      int color_plane, unsigned int height)
2058 {
2059 	unsigned int tile_height = intel_tile_height(fb, color_plane);
2060 
2061 	return ALIGN(height, tile_height);
2062 }
2063 
2064 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2065 {
2066 	unsigned int size = 0;
2067 	int i;
2068 
2069 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2070 		size += rot_info->plane[i].width * rot_info->plane[i].height;
2071 
2072 	return size;
2073 }
2074 
2075 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2076 {
2077 	unsigned int size = 0;
2078 	int i;
2079 
2080 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2081 		size += rem_info->plane[i].width * rem_info->plane[i].height;
2082 
2083 	return size;
2084 }
2085 
2086 static void
2087 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2088 			const struct drm_framebuffer *fb,
2089 			unsigned int rotation)
2090 {
2091 	view->type = I915_GGTT_VIEW_NORMAL;
2092 	if (drm_rotation_90_or_270(rotation)) {
2093 		view->type = I915_GGTT_VIEW_ROTATED;
2094 		view->rotated = to_intel_framebuffer(fb)->rot_info;
2095 	}
2096 }
2097 
2098 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2099 {
2100 	if (IS_I830(dev_priv))
2101 		return 16 * 1024;
2102 	else if (IS_I85X(dev_priv))
2103 		return 256;
2104 	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2105 		return 32;
2106 	else
2107 		return 4 * 1024;
2108 }
2109 
2110 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2111 {
2112 	if (INTEL_GEN(dev_priv) >= 9)
2113 		return 256 * 1024;
2114 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2115 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2116 		return 128 * 1024;
2117 	else if (INTEL_GEN(dev_priv) >= 4)
2118 		return 4 * 1024;
2119 	else
2120 		return 0;
2121 }
2122 
2123 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2124 					 int color_plane)
2125 {
2126 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2127 
2128 	/* AUX_DIST needs only 4K alignment */
2129 	if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2130 	    is_ccs_plane(fb, color_plane))
2131 		return 4096;
2132 
2133 	switch (fb->modifier) {
2134 	case DRM_FORMAT_MOD_LINEAR:
2135 		return intel_linear_alignment(dev_priv);
2136 	case I915_FORMAT_MOD_X_TILED:
2137 		if (INTEL_GEN(dev_priv) >= 9)
2138 			return 256 * 1024;
2139 		return 0;
2140 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2141 		if (is_semiplanar_uv_plane(fb, color_plane))
2142 			return intel_tile_row_size(fb, color_plane);
2143 		fallthrough;
2144 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2145 		return 16 * 1024;
2146 	case I915_FORMAT_MOD_Y_TILED_CCS:
2147 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2148 	case I915_FORMAT_MOD_Y_TILED:
2149 		if (INTEL_GEN(dev_priv) >= 12 &&
2150 		    is_semiplanar_uv_plane(fb, color_plane))
2151 			return intel_tile_row_size(fb, color_plane);
2152 		fallthrough;
2153 	case I915_FORMAT_MOD_Yf_TILED:
2154 		return 1 * 1024 * 1024;
2155 	default:
2156 		MISSING_CASE(fb->modifier);
2157 		return 0;
2158 	}
2159 }
2160 
2161 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2162 {
2163 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2164 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2165 
2166 	return INTEL_GEN(dev_priv) < 4 ||
2167 		(plane->has_fbc &&
2168 		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2169 }
2170 
2171 struct i915_vma *
2172 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2173 			   const struct i915_ggtt_view *view,
2174 			   bool uses_fence,
2175 			   unsigned long *out_flags)
2176 {
2177 	struct drm_device *dev = fb->dev;
2178 	struct drm_i915_private *dev_priv = to_i915(dev);
2179 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2180 	intel_wakeref_t wakeref;
2181 	struct i915_vma *vma;
2182 	unsigned int pinctl;
2183 	u32 alignment;
2184 
2185 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
2186 		return ERR_PTR(-EINVAL);
2187 
2188 	alignment = intel_surf_alignment(fb, 0);
2189 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
2190 		return ERR_PTR(-EINVAL);
2191 
2192 	/* Note that the w/a also requires 64 PTE of padding following the
2193 	 * bo. We currently fill all unused PTE with the shadow page and so
2194 	 * we should always have valid PTE following the scanout preventing
2195 	 * the VT-d warning.
2196 	 */
2197 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2198 		alignment = 256 * 1024;
2199 
2200 	/*
2201 	 * Global gtt pte registers are special registers which actually forward
2202 	 * writes to a chunk of system memory. Which means that there is no risk
2203 	 * that the register values disappear as soon as we call
2204 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2205 	 * pin/unpin/fence and not more.
2206 	 */
2207 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2208 
2209 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2210 
2211 	/*
2212 	 * Valleyview is definitely limited to scanning out the first
2213 	 * 512MiB. Lets presume this behaviour was inherited from the
2214 	 * g4x display engine and that all earlier gen are similarly
2215 	 * limited. Testing suggests that it is a little more
2216 	 * complicated than this. For example, Cherryview appears quite
2217 	 * happy to scanout from anywhere within its global aperture.
2218 	 */
2219 	pinctl = 0;
2220 	if (HAS_GMCH(dev_priv))
2221 		pinctl |= PIN_MAPPABLE;
2222 
2223 	vma = i915_gem_object_pin_to_display_plane(obj,
2224 						   alignment, view, pinctl);
2225 	if (IS_ERR(vma))
2226 		goto err;
2227 
2228 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2229 		int ret;
2230 
2231 		/*
2232 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
2233 		 * fence, whereas 965+ only requires a fence if using
2234 		 * framebuffer compression.  For simplicity, we always, when
2235 		 * possible, install a fence as the cost is not that onerous.
2236 		 *
2237 		 * If we fail to fence the tiled scanout, then either the
2238 		 * modeset will reject the change (which is highly unlikely as
2239 		 * the affected systems, all but one, do not have unmappable
2240 		 * space) or we will not be able to enable full powersaving
2241 		 * techniques (also likely not to apply due to various limits
2242 		 * FBC and the like impose on the size of the buffer, which
2243 		 * presumably we violated anyway with this unmappable buffer).
2244 		 * Anyway, it is presumably better to stumble onwards with
2245 		 * something and try to run the system in a "less than optimal"
2246 		 * mode that matches the user configuration.
2247 		 */
2248 		ret = i915_vma_pin_fence(vma);
2249 		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2250 			i915_gem_object_unpin_from_display_plane(vma);
2251 			vma = ERR_PTR(ret);
2252 			goto err;
2253 		}
2254 
2255 		if (ret == 0 && vma->fence)
2256 			*out_flags |= PLANE_HAS_FENCE;
2257 	}
2258 
2259 	i915_vma_get(vma);
2260 err:
2261 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2262 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2263 	return vma;
2264 }
2265 
2266 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2267 {
2268 	i915_gem_object_lock(vma->obj, NULL);
2269 	if (flags & PLANE_HAS_FENCE)
2270 		i915_vma_unpin_fence(vma);
2271 	i915_gem_object_unpin_from_display_plane(vma);
2272 	i915_gem_object_unlock(vma->obj);
2273 
2274 	i915_vma_put(vma);
2275 }
2276 
2277 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2278 			  unsigned int rotation)
2279 {
2280 	if (drm_rotation_90_or_270(rotation))
2281 		return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2282 	else
2283 		return fb->pitches[color_plane];
2284 }
2285 
2286 /*
2287  * Convert the x/y offsets into a linear offset.
2288  * Only valid with 0/180 degree rotation, which is fine since linear
2289  * offset is only used with linear buffers on pre-hsw and tiled buffers
2290  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2291  */
2292 u32 intel_fb_xy_to_linear(int x, int y,
2293 			  const struct intel_plane_state *state,
2294 			  int color_plane)
2295 {
2296 	const struct drm_framebuffer *fb = state->hw.fb;
2297 	unsigned int cpp = fb->format->cpp[color_plane];
2298 	unsigned int pitch = state->color_plane[color_plane].stride;
2299 
2300 	return y * pitch + x * cpp;
2301 }
2302 
2303 /*
2304  * Add the x/y offsets derived from fb->offsets[] to the user
2305  * specified plane src x/y offsets. The resulting x/y offsets
2306  * specify the start of scanout from the beginning of the gtt mapping.
2307  */
2308 void intel_add_fb_offsets(int *x, int *y,
2309 			  const struct intel_plane_state *state,
2310 			  int color_plane)
2311 
2312 {
2313 	*x += state->color_plane[color_plane].x;
2314 	*y += state->color_plane[color_plane].y;
2315 }
2316 
2317 static u32 intel_adjust_tile_offset(int *x, int *y,
2318 				    unsigned int tile_width,
2319 				    unsigned int tile_height,
2320 				    unsigned int tile_size,
2321 				    unsigned int pitch_tiles,
2322 				    u32 old_offset,
2323 				    u32 new_offset)
2324 {
2325 	unsigned int pitch_pixels = pitch_tiles * tile_width;
2326 	unsigned int tiles;
2327 
2328 	WARN_ON(old_offset & (tile_size - 1));
2329 	WARN_ON(new_offset & (tile_size - 1));
2330 	WARN_ON(new_offset > old_offset);
2331 
2332 	tiles = (old_offset - new_offset) / tile_size;
2333 
2334 	*y += tiles / pitch_tiles * tile_height;
2335 	*x += tiles % pitch_tiles * tile_width;
2336 
2337 	/* minimize x in case it got needlessly big */
2338 	*y += *x / pitch_pixels * tile_height;
2339 	*x %= pitch_pixels;
2340 
2341 	return new_offset;
2342 }
2343 
2344 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2345 {
2346 	return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2347 	       is_gen12_ccs_plane(fb, color_plane);
2348 }
2349 
2350 static u32 intel_adjust_aligned_offset(int *x, int *y,
2351 				       const struct drm_framebuffer *fb,
2352 				       int color_plane,
2353 				       unsigned int rotation,
2354 				       unsigned int pitch,
2355 				       u32 old_offset, u32 new_offset)
2356 {
2357 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2358 	unsigned int cpp = fb->format->cpp[color_plane];
2359 
2360 	drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
2361 
2362 	if (!is_surface_linear(fb, color_plane)) {
2363 		unsigned int tile_size, tile_width, tile_height;
2364 		unsigned int pitch_tiles;
2365 
2366 		tile_size = intel_tile_size(dev_priv);
2367 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2368 
2369 		if (drm_rotation_90_or_270(rotation)) {
2370 			pitch_tiles = pitch / tile_height;
2371 			swap(tile_width, tile_height);
2372 		} else {
2373 			pitch_tiles = pitch / (tile_width * cpp);
2374 		}
2375 
2376 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2377 					 tile_size, pitch_tiles,
2378 					 old_offset, new_offset);
2379 	} else {
2380 		old_offset += *y * pitch + *x * cpp;
2381 
2382 		*y = (old_offset - new_offset) / pitch;
2383 		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
2384 	}
2385 
2386 	return new_offset;
2387 }
2388 
2389 /*
2390  * Adjust the tile offset by moving the difference into
2391  * the x/y offsets.
2392  */
2393 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2394 					     const struct intel_plane_state *state,
2395 					     int color_plane,
2396 					     u32 old_offset, u32 new_offset)
2397 {
2398 	return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2399 					   state->hw.rotation,
2400 					   state->color_plane[color_plane].stride,
2401 					   old_offset, new_offset);
2402 }
2403 
2404 /*
2405  * Computes the aligned offset to the base tile and adjusts
2406  * x, y. bytes per pixel is assumed to be a power-of-two.
2407  *
2408  * In the 90/270 rotated case, x and y are assumed
2409  * to be already rotated to match the rotated GTT view, and
2410  * pitch is the tile_height aligned framebuffer height.
2411  *
2412  * This function is used when computing the derived information
2413  * under intel_framebuffer, so using any of that information
2414  * here is not allowed. Anything under drm_framebuffer can be
2415  * used. This is why the user has to pass in the pitch since it
2416  * is specified in the rotated orientation.
2417  */
2418 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2419 					int *x, int *y,
2420 					const struct drm_framebuffer *fb,
2421 					int color_plane,
2422 					unsigned int pitch,
2423 					unsigned int rotation,
2424 					u32 alignment)
2425 {
2426 	unsigned int cpp = fb->format->cpp[color_plane];
2427 	u32 offset, offset_aligned;
2428 
2429 	if (!is_surface_linear(fb, color_plane)) {
2430 		unsigned int tile_size, tile_width, tile_height;
2431 		unsigned int tile_rows, tiles, pitch_tiles;
2432 
2433 		tile_size = intel_tile_size(dev_priv);
2434 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2435 
2436 		if (drm_rotation_90_or_270(rotation)) {
2437 			pitch_tiles = pitch / tile_height;
2438 			swap(tile_width, tile_height);
2439 		} else {
2440 			pitch_tiles = pitch / (tile_width * cpp);
2441 		}
2442 
2443 		tile_rows = *y / tile_height;
2444 		*y %= tile_height;
2445 
2446 		tiles = *x / tile_width;
2447 		*x %= tile_width;
2448 
2449 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2450 
2451 		offset_aligned = offset;
2452 		if (alignment)
2453 			offset_aligned = rounddown(offset_aligned, alignment);
2454 
2455 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2456 					 tile_size, pitch_tiles,
2457 					 offset, offset_aligned);
2458 	} else {
2459 		offset = *y * pitch + *x * cpp;
2460 		offset_aligned = offset;
2461 		if (alignment) {
2462 			offset_aligned = rounddown(offset_aligned, alignment);
2463 			*y = (offset % alignment) / pitch;
2464 			*x = ((offset % alignment) - *y * pitch) / cpp;
2465 		} else {
2466 			*y = *x = 0;
2467 		}
2468 	}
2469 
2470 	return offset_aligned;
2471 }
2472 
2473 u32 intel_plane_compute_aligned_offset(int *x, int *y,
2474 				       const struct intel_plane_state *state,
2475 				       int color_plane)
2476 {
2477 	struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2478 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2479 	const struct drm_framebuffer *fb = state->hw.fb;
2480 	unsigned int rotation = state->hw.rotation;
2481 	int pitch = state->color_plane[color_plane].stride;
2482 	u32 alignment;
2483 
2484 	if (intel_plane->id == PLANE_CURSOR)
2485 		alignment = intel_cursor_alignment(dev_priv);
2486 	else
2487 		alignment = intel_surf_alignment(fb, color_plane);
2488 
2489 	return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2490 					    pitch, rotation, alignment);
2491 }
2492 
2493 /* Convert the fb->offset[] into x/y offsets */
2494 static int intel_fb_offset_to_xy(int *x, int *y,
2495 				 const struct drm_framebuffer *fb,
2496 				 int color_plane)
2497 {
2498 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2499 	unsigned int height;
2500 	u32 alignment;
2501 
2502 	if (INTEL_GEN(dev_priv) >= 12 &&
2503 	    is_semiplanar_uv_plane(fb, color_plane))
2504 		alignment = intel_tile_row_size(fb, color_plane);
2505 	else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2506 		alignment = intel_tile_size(dev_priv);
2507 	else
2508 		alignment = 0;
2509 
2510 	if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2511 		drm_dbg_kms(&dev_priv->drm,
2512 			    "Misaligned offset 0x%08x for color plane %d\n",
2513 			    fb->offsets[color_plane], color_plane);
2514 		return -EINVAL;
2515 	}
2516 
2517 	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2518 	height = ALIGN(height, intel_tile_height(fb, color_plane));
2519 
2520 	/* Catch potential overflows early */
2521 	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2522 			    fb->offsets[color_plane])) {
2523 		drm_dbg_kms(&dev_priv->drm,
2524 			    "Bad offset 0x%08x or pitch %d for color plane %d\n",
2525 			    fb->offsets[color_plane], fb->pitches[color_plane],
2526 			    color_plane);
2527 		return -ERANGE;
2528 	}
2529 
2530 	*x = 0;
2531 	*y = 0;
2532 
2533 	intel_adjust_aligned_offset(x, y,
2534 				    fb, color_plane, DRM_MODE_ROTATE_0,
2535 				    fb->pitches[color_plane],
2536 				    fb->offsets[color_plane], 0);
2537 
2538 	return 0;
2539 }
2540 
2541 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2542 {
2543 	switch (fb_modifier) {
2544 	case I915_FORMAT_MOD_X_TILED:
2545 		return I915_TILING_X;
2546 	case I915_FORMAT_MOD_Y_TILED:
2547 	case I915_FORMAT_MOD_Y_TILED_CCS:
2548 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2549 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2550 		return I915_TILING_Y;
2551 	default:
2552 		return I915_TILING_NONE;
2553 	}
2554 }
2555 
2556 /*
2557  * From the Sky Lake PRM:
2558  * "The Color Control Surface (CCS) contains the compression status of
2559  *  the cache-line pairs. The compression state of the cache-line pair
2560  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2561  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2562  *  cache-line-pairs. CCS is always Y tiled."
2563  *
2564  * Since cache line pairs refers to horizontally adjacent cache lines,
2565  * each cache line in the CCS corresponds to an area of 32x16 cache
2566  * lines on the main surface. Since each pixel is 4 bytes, this gives
2567  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2568  * main surface.
2569  */
2570 static const struct drm_format_info skl_ccs_formats[] = {
2571 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2572 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2573 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2574 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2575 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2576 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2577 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2578 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2579 };
2580 
2581 /*
2582  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2583  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2584  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2585  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2586  * the main surface.
2587  */
2588 static const struct drm_format_info gen12_ccs_formats[] = {
2589 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2590 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2591 	  .hsub = 1, .vsub = 1, },
2592 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2593 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2594 	  .hsub = 1, .vsub = 1, },
2595 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2596 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2597 	  .hsub = 1, .vsub = 1, .has_alpha = true },
2598 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2599 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2600 	  .hsub = 1, .vsub = 1, .has_alpha = true },
2601 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
2602 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2603 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2604 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
2605 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2606 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2607 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
2608 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2609 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2610 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
2611 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2612 	  .hsub = 2, .vsub = 1, .is_yuv = true },
2613 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
2614 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2615 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2616 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
2617 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2618 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2619 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
2620 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2621 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2622 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
2623 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2624 	  .hsub = 2, .vsub = 2, .is_yuv = true },
2625 };
2626 
2627 static const struct drm_format_info *
2628 lookup_format_info(const struct drm_format_info formats[],
2629 		   int num_formats, u32 format)
2630 {
2631 	int i;
2632 
2633 	for (i = 0; i < num_formats; i++) {
2634 		if (formats[i].format == format)
2635 			return &formats[i];
2636 	}
2637 
2638 	return NULL;
2639 }
2640 
2641 static const struct drm_format_info *
2642 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2643 {
2644 	switch (cmd->modifier[0]) {
2645 	case I915_FORMAT_MOD_Y_TILED_CCS:
2646 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2647 		return lookup_format_info(skl_ccs_formats,
2648 					  ARRAY_SIZE(skl_ccs_formats),
2649 					  cmd->pixel_format);
2650 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2651 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2652 		return lookup_format_info(gen12_ccs_formats,
2653 					  ARRAY_SIZE(gen12_ccs_formats),
2654 					  cmd->pixel_format);
2655 	default:
2656 		return NULL;
2657 	}
2658 }
2659 
2660 bool is_ccs_modifier(u64 modifier)
2661 {
2662 	return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2663 	       modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2664 	       modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2665 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2666 }
2667 
2668 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2669 {
2670 	return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2671 			    512) * 64;
2672 }
2673 
2674 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2675 			      u32 pixel_format, u64 modifier)
2676 {
2677 	struct intel_crtc *crtc;
2678 	struct intel_plane *plane;
2679 
2680 	/*
2681 	 * We assume the primary plane for pipe A has
2682 	 * the highest stride limits of them all,
2683 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
2684 	 */
2685 	crtc = intel_get_first_crtc(dev_priv);
2686 	if (!crtc)
2687 		return 0;
2688 
2689 	plane = to_intel_plane(crtc->base.primary);
2690 
2691 	return plane->max_stride(plane, pixel_format, modifier,
2692 				 DRM_MODE_ROTATE_0);
2693 }
2694 
2695 static
2696 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2697 			u32 pixel_format, u64 modifier)
2698 {
2699 	/*
2700 	 * Arbitrary limit for gen4+ chosen to match the
2701 	 * render engine max stride.
2702 	 *
2703 	 * The new CCS hash mode makes remapping impossible
2704 	 */
2705 	if (!is_ccs_modifier(modifier)) {
2706 		if (INTEL_GEN(dev_priv) >= 7)
2707 			return 256*1024;
2708 		else if (INTEL_GEN(dev_priv) >= 4)
2709 			return 128*1024;
2710 	}
2711 
2712 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2713 }
2714 
2715 static u32
2716 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2717 {
2718 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2719 	u32 tile_width;
2720 
2721 	if (is_surface_linear(fb, color_plane)) {
2722 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2723 							   fb->format->format,
2724 							   fb->modifier);
2725 
2726 		/*
2727 		 * To make remapping with linear generally feasible
2728 		 * we need the stride to be page aligned.
2729 		 */
2730 		if (fb->pitches[color_plane] > max_stride &&
2731 		    !is_ccs_modifier(fb->modifier))
2732 			return intel_tile_size(dev_priv);
2733 		else
2734 			return 64;
2735 	}
2736 
2737 	tile_width = intel_tile_width_bytes(fb, color_plane);
2738 	if (is_ccs_modifier(fb->modifier)) {
2739 		/*
2740 		 * Display WA #0531: skl,bxt,kbl,glk
2741 		 *
2742 		 * Render decompression and plane width > 3840
2743 		 * combined with horizontal panning requires the
2744 		 * plane stride to be a multiple of 4. We'll just
2745 		 * require the entire fb to accommodate that to avoid
2746 		 * potential runtime errors at plane configuration time.
2747 		 */
2748 		if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2749 			tile_width *= 4;
2750 		/*
2751 		 * The main surface pitch must be padded to a multiple of four
2752 		 * tile widths.
2753 		 */
2754 		else if (INTEL_GEN(dev_priv) >= 12)
2755 			tile_width *= 4;
2756 	}
2757 	return tile_width;
2758 }
2759 
2760 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2761 {
2762 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2763 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2764 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2765 	int i;
2766 
2767 	/* We don't want to deal with remapping with cursors */
2768 	if (plane->id == PLANE_CURSOR)
2769 		return false;
2770 
2771 	/*
2772 	 * The display engine limits already match/exceed the
2773 	 * render engine limits, so not much point in remapping.
2774 	 * Would also need to deal with the fence POT alignment
2775 	 * and gen2 2KiB GTT tile size.
2776 	 */
2777 	if (INTEL_GEN(dev_priv) < 4)
2778 		return false;
2779 
2780 	/*
2781 	 * The new CCS hash mode isn't compatible with remapping as
2782 	 * the virtual address of the pages affects the compressed data.
2783 	 */
2784 	if (is_ccs_modifier(fb->modifier))
2785 		return false;
2786 
2787 	/* Linear needs a page aligned stride for remapping */
2788 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2789 		unsigned int alignment = intel_tile_size(dev_priv) - 1;
2790 
2791 		for (i = 0; i < fb->format->num_planes; i++) {
2792 			if (fb->pitches[i] & alignment)
2793 				return false;
2794 		}
2795 	}
2796 
2797 	return true;
2798 }
2799 
2800 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2801 {
2802 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2803 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2804 	unsigned int rotation = plane_state->hw.rotation;
2805 	u32 stride, max_stride;
2806 
2807 	/*
2808 	 * No remapping for invisible planes since we don't have
2809 	 * an actual source viewport to remap.
2810 	 */
2811 	if (!plane_state->uapi.visible)
2812 		return false;
2813 
2814 	if (!intel_plane_can_remap(plane_state))
2815 		return false;
2816 
2817 	/*
2818 	 * FIXME: aux plane limits on gen9+ are
2819 	 * unclear in Bspec, for now no checking.
2820 	 */
2821 	stride = intel_fb_pitch(fb, 0, rotation);
2822 	max_stride = plane->max_stride(plane, fb->format->format,
2823 				       fb->modifier, rotation);
2824 
2825 	return stride > max_stride;
2826 }
2827 
2828 static void
2829 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2830 			       const struct drm_framebuffer *fb,
2831 			       int color_plane)
2832 {
2833 	int main_plane;
2834 
2835 	if (color_plane == 0) {
2836 		*hsub = 1;
2837 		*vsub = 1;
2838 
2839 		return;
2840 	}
2841 
2842 	/*
2843 	 * TODO: Deduct the subsampling from the char block for all CCS
2844 	 * formats and planes.
2845 	 */
2846 	if (!is_gen12_ccs_plane(fb, color_plane)) {
2847 		*hsub = fb->format->hsub;
2848 		*vsub = fb->format->vsub;
2849 
2850 		return;
2851 	}
2852 
2853 	main_plane = ccs_to_main_plane(fb, color_plane);
2854 	*hsub = drm_format_info_block_width(fb->format, color_plane) /
2855 		drm_format_info_block_width(fb->format, main_plane);
2856 
2857 	/*
2858 	 * The min stride check in the core framebuffer_check() function
2859 	 * assumes that format->hsub applies to every plane except for the
2860 	 * first plane. That's incorrect for the CCS AUX plane of the first
2861 	 * plane, but for the above check to pass we must define the block
2862 	 * width with that subsampling applied to it. Adjust the width here
2863 	 * accordingly, so we can calculate the actual subsampling factor.
2864 	 */
2865 	if (main_plane == 0)
2866 		*hsub *= fb->format->hsub;
2867 
2868 	*vsub = 32;
2869 }
2870 static int
2871 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2872 {
2873 	struct drm_i915_private *i915 = to_i915(fb->dev);
2874 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2875 	int main_plane;
2876 	int hsub, vsub;
2877 	int tile_width, tile_height;
2878 	int ccs_x, ccs_y;
2879 	int main_x, main_y;
2880 
2881 	if (!is_ccs_plane(fb, ccs_plane))
2882 		return 0;
2883 
2884 	intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2885 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2886 
2887 	tile_width *= hsub;
2888 	tile_height *= vsub;
2889 
2890 	ccs_x = (x * hsub) % tile_width;
2891 	ccs_y = (y * vsub) % tile_height;
2892 
2893 	main_plane = ccs_to_main_plane(fb, ccs_plane);
2894 	main_x = intel_fb->normal[main_plane].x % tile_width;
2895 	main_y = intel_fb->normal[main_plane].y % tile_height;
2896 
2897 	/*
2898 	 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2899 	 * x/y offsets must match between CCS and the main surface.
2900 	 */
2901 	if (main_x != ccs_x || main_y != ccs_y) {
2902 		drm_dbg_kms(&i915->drm,
2903 			      "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2904 			      main_x, main_y,
2905 			      ccs_x, ccs_y,
2906 			      intel_fb->normal[main_plane].x,
2907 			      intel_fb->normal[main_plane].y,
2908 			      x, y);
2909 		return -EINVAL;
2910 	}
2911 
2912 	return 0;
2913 }
2914 
2915 static void
2916 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2917 {
2918 	int main_plane = is_ccs_plane(fb, color_plane) ?
2919 			 ccs_to_main_plane(fb, color_plane) : 0;
2920 	int main_hsub, main_vsub;
2921 	int hsub, vsub;
2922 
2923 	intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2924 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2925 	*w = fb->width / main_hsub / hsub;
2926 	*h = fb->height / main_vsub / vsub;
2927 }
2928 
2929 /*
2930  * Setup the rotated view for an FB plane and return the size the GTT mapping
2931  * requires for this view.
2932  */
2933 static u32
2934 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2935 		  u32 gtt_offset_rotated, int x, int y,
2936 		  unsigned int width, unsigned int height,
2937 		  unsigned int tile_size,
2938 		  unsigned int tile_width, unsigned int tile_height,
2939 		  struct drm_framebuffer *fb)
2940 {
2941 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2942 	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2943 	unsigned int pitch_tiles;
2944 	struct drm_rect r;
2945 
2946 	/* Y or Yf modifiers required for 90/270 rotation */
2947 	if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2948 	    fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2949 		return 0;
2950 
2951 	if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
2952 		return 0;
2953 
2954 	rot_info->plane[plane] = *plane_info;
2955 
2956 	intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
2957 
2958 	/* rotate the x/y offsets to match the GTT view */
2959 	drm_rect_init(&r, x, y, width, height);
2960 	drm_rect_rotate(&r,
2961 			plane_info->width * tile_width,
2962 			plane_info->height * tile_height,
2963 			DRM_MODE_ROTATE_270);
2964 	x = r.x1;
2965 	y = r.y1;
2966 
2967 	/* rotate the tile dimensions to match the GTT view */
2968 	pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
2969 	swap(tile_width, tile_height);
2970 
2971 	/*
2972 	 * We only keep the x/y offsets, so push all of the
2973 	 * gtt offset into the x/y offsets.
2974 	 */
2975 	intel_adjust_tile_offset(&x, &y,
2976 				 tile_width, tile_height,
2977 				 tile_size, pitch_tiles,
2978 				 gtt_offset_rotated * tile_size, 0);
2979 
2980 	/*
2981 	 * First pixel of the framebuffer from
2982 	 * the start of the rotated gtt mapping.
2983 	 */
2984 	intel_fb->rotated[plane].x = x;
2985 	intel_fb->rotated[plane].y = y;
2986 
2987 	return plane_info->width * plane_info->height;
2988 }
2989 
2990 static int
2991 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2992 		   struct drm_framebuffer *fb)
2993 {
2994 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2995 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2996 	u32 gtt_offset_rotated = 0;
2997 	unsigned int max_size = 0;
2998 	int i, num_planes = fb->format->num_planes;
2999 	unsigned int tile_size = intel_tile_size(dev_priv);
3000 
3001 	for (i = 0; i < num_planes; i++) {
3002 		unsigned int width, height;
3003 		unsigned int cpp, size;
3004 		u32 offset;
3005 		int x, y;
3006 		int ret;
3007 
3008 		cpp = fb->format->cpp[i];
3009 		intel_fb_plane_dims(&width, &height, fb, i);
3010 
3011 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3012 		if (ret) {
3013 			drm_dbg_kms(&dev_priv->drm,
3014 				    "bad fb plane %d offset: 0x%x\n",
3015 				    i, fb->offsets[i]);
3016 			return ret;
3017 		}
3018 
3019 		ret = intel_fb_check_ccs_xy(fb, i, x, y);
3020 		if (ret)
3021 			return ret;
3022 
3023 		/*
3024 		 * The fence (if used) is aligned to the start of the object
3025 		 * so having the framebuffer wrap around across the edge of the
3026 		 * fenced region doesn't really work. We have no API to configure
3027 		 * the fence start offset within the object (nor could we probably
3028 		 * on gen2/3). So it's just easier if we just require that the
3029 		 * fb layout agrees with the fence layout. We already check that the
3030 		 * fb stride matches the fence stride elsewhere.
3031 		 */
3032 		if (i == 0 && i915_gem_object_is_tiled(obj) &&
3033 		    (x + width) * cpp > fb->pitches[i]) {
3034 			drm_dbg_kms(&dev_priv->drm,
3035 				    "bad fb plane %d offset: 0x%x\n",
3036 				     i, fb->offsets[i]);
3037 			return -EINVAL;
3038 		}
3039 
3040 		/*
3041 		 * First pixel of the framebuffer from
3042 		 * the start of the normal gtt mapping.
3043 		 */
3044 		intel_fb->normal[i].x = x;
3045 		intel_fb->normal[i].y = y;
3046 
3047 		offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3048 						      fb->pitches[i],
3049 						      DRM_MODE_ROTATE_0,
3050 						      tile_size);
3051 		offset /= tile_size;
3052 
3053 		if (!is_surface_linear(fb, i)) {
3054 			struct intel_remapped_plane_info plane_info;
3055 			unsigned int tile_width, tile_height;
3056 
3057 			intel_tile_dims(fb, i, &tile_width, &tile_height);
3058 
3059 			plane_info.offset = offset;
3060 			plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3061 							 tile_width * cpp);
3062 			plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3063 			plane_info.height = DIV_ROUND_UP(y + height,
3064 							 tile_height);
3065 
3066 			/* how many tiles does this plane need */
3067 			size = plane_info.stride * plane_info.height;
3068 			/*
3069 			 * If the plane isn't horizontally tile aligned,
3070 			 * we need one more tile.
3071 			 */
3072 			if (x != 0)
3073 				size++;
3074 
3075 			gtt_offset_rotated +=
3076 				setup_fb_rotation(i, &plane_info,
3077 						  gtt_offset_rotated,
3078 						  x, y, width, height,
3079 						  tile_size,
3080 						  tile_width, tile_height,
3081 						  fb);
3082 		} else {
3083 			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3084 					    x * cpp, tile_size);
3085 		}
3086 
3087 		/* how many tiles in total needed in the bo */
3088 		max_size = max(max_size, offset + size);
3089 	}
3090 
3091 	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3092 		drm_dbg_kms(&dev_priv->drm,
3093 			    "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3094 			    mul_u32_u32(max_size, tile_size), obj->base.size);
3095 		return -EINVAL;
3096 	}
3097 
3098 	return 0;
3099 }
3100 
3101 static void
3102 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3103 {
3104 	struct drm_i915_private *dev_priv =
3105 		to_i915(plane_state->uapi.plane->dev);
3106 	struct drm_framebuffer *fb = plane_state->hw.fb;
3107 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3108 	struct intel_rotation_info *info = &plane_state->view.rotated;
3109 	unsigned int rotation = plane_state->hw.rotation;
3110 	int i, num_planes = fb->format->num_planes;
3111 	unsigned int tile_size = intel_tile_size(dev_priv);
3112 	unsigned int src_x, src_y;
3113 	unsigned int src_w, src_h;
3114 	u32 gtt_offset = 0;
3115 
3116 	memset(&plane_state->view, 0, sizeof(plane_state->view));
3117 	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3118 		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3119 
3120 	src_x = plane_state->uapi.src.x1 >> 16;
3121 	src_y = plane_state->uapi.src.y1 >> 16;
3122 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3123 	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3124 
3125 	drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
3126 
3127 	/* Make src coordinates relative to the viewport */
3128 	drm_rect_translate(&plane_state->uapi.src,
3129 			   -(src_x << 16), -(src_y << 16));
3130 
3131 	/* Rotate src coordinates to match rotated GTT view */
3132 	if (drm_rotation_90_or_270(rotation))
3133 		drm_rect_rotate(&plane_state->uapi.src,
3134 				src_w << 16, src_h << 16,
3135 				DRM_MODE_ROTATE_270);
3136 
3137 	for (i = 0; i < num_planes; i++) {
3138 		unsigned int hsub = i ? fb->format->hsub : 1;
3139 		unsigned int vsub = i ? fb->format->vsub : 1;
3140 		unsigned int cpp = fb->format->cpp[i];
3141 		unsigned int tile_width, tile_height;
3142 		unsigned int width, height;
3143 		unsigned int pitch_tiles;
3144 		unsigned int x, y;
3145 		u32 offset;
3146 
3147 		intel_tile_dims(fb, i, &tile_width, &tile_height);
3148 
3149 		x = src_x / hsub;
3150 		y = src_y / vsub;
3151 		width = src_w / hsub;
3152 		height = src_h / vsub;
3153 
3154 		/*
3155 		 * First pixel of the src viewport from the
3156 		 * start of the normal gtt mapping.
3157 		 */
3158 		x += intel_fb->normal[i].x;
3159 		y += intel_fb->normal[i].y;
3160 
3161 		offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3162 						      fb, i, fb->pitches[i],
3163 						      DRM_MODE_ROTATE_0, tile_size);
3164 		offset /= tile_size;
3165 
3166 		drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
3167 		info->plane[i].offset = offset;
3168 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3169 						     tile_width * cpp);
3170 		info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3171 		info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3172 
3173 		if (drm_rotation_90_or_270(rotation)) {
3174 			struct drm_rect r;
3175 
3176 			/* rotate the x/y offsets to match the GTT view */
3177 			drm_rect_init(&r, x, y, width, height);
3178 			drm_rect_rotate(&r,
3179 					info->plane[i].width * tile_width,
3180 					info->plane[i].height * tile_height,
3181 					DRM_MODE_ROTATE_270);
3182 			x = r.x1;
3183 			y = r.y1;
3184 
3185 			pitch_tiles = info->plane[i].height;
3186 			plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3187 
3188 			/* rotate the tile dimensions to match the GTT view */
3189 			swap(tile_width, tile_height);
3190 		} else {
3191 			pitch_tiles = info->plane[i].width;
3192 			plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3193 		}
3194 
3195 		/*
3196 		 * We only keep the x/y offsets, so push all of the
3197 		 * gtt offset into the x/y offsets.
3198 		 */
3199 		intel_adjust_tile_offset(&x, &y,
3200 					 tile_width, tile_height,
3201 					 tile_size, pitch_tiles,
3202 					 gtt_offset * tile_size, 0);
3203 
3204 		gtt_offset += info->plane[i].width * info->plane[i].height;
3205 
3206 		plane_state->color_plane[i].offset = 0;
3207 		plane_state->color_plane[i].x = x;
3208 		plane_state->color_plane[i].y = y;
3209 	}
3210 }
3211 
3212 int
3213 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3214 {
3215 	const struct intel_framebuffer *fb =
3216 		to_intel_framebuffer(plane_state->hw.fb);
3217 	unsigned int rotation = plane_state->hw.rotation;
3218 	int i, num_planes;
3219 
3220 	if (!fb)
3221 		return 0;
3222 
3223 	num_planes = fb->base.format->num_planes;
3224 
3225 	if (intel_plane_needs_remap(plane_state)) {
3226 		intel_plane_remap_gtt(plane_state);
3227 
3228 		/*
3229 		 * Sometimes even remapping can't overcome
3230 		 * the stride limitations :( Can happen with
3231 		 * big plane sizes and suitably misaligned
3232 		 * offsets.
3233 		 */
3234 		return intel_plane_check_stride(plane_state);
3235 	}
3236 
3237 	intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3238 
3239 	for (i = 0; i < num_planes; i++) {
3240 		plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3241 		plane_state->color_plane[i].offset = 0;
3242 
3243 		if (drm_rotation_90_or_270(rotation)) {
3244 			plane_state->color_plane[i].x = fb->rotated[i].x;
3245 			plane_state->color_plane[i].y = fb->rotated[i].y;
3246 		} else {
3247 			plane_state->color_plane[i].x = fb->normal[i].x;
3248 			plane_state->color_plane[i].y = fb->normal[i].y;
3249 		}
3250 	}
3251 
3252 	/* Rotate src coordinates to match rotated GTT view */
3253 	if (drm_rotation_90_or_270(rotation))
3254 		drm_rect_rotate(&plane_state->uapi.src,
3255 				fb->base.width << 16, fb->base.height << 16,
3256 				DRM_MODE_ROTATE_270);
3257 
3258 	return intel_plane_check_stride(plane_state);
3259 }
3260 
3261 static int i9xx_format_to_fourcc(int format)
3262 {
3263 	switch (format) {
3264 	case DISPPLANE_8BPP:
3265 		return DRM_FORMAT_C8;
3266 	case DISPPLANE_BGRA555:
3267 		return DRM_FORMAT_ARGB1555;
3268 	case DISPPLANE_BGRX555:
3269 		return DRM_FORMAT_XRGB1555;
3270 	case DISPPLANE_BGRX565:
3271 		return DRM_FORMAT_RGB565;
3272 	default:
3273 	case DISPPLANE_BGRX888:
3274 		return DRM_FORMAT_XRGB8888;
3275 	case DISPPLANE_RGBX888:
3276 		return DRM_FORMAT_XBGR8888;
3277 	case DISPPLANE_BGRA888:
3278 		return DRM_FORMAT_ARGB8888;
3279 	case DISPPLANE_RGBA888:
3280 		return DRM_FORMAT_ABGR8888;
3281 	case DISPPLANE_BGRX101010:
3282 		return DRM_FORMAT_XRGB2101010;
3283 	case DISPPLANE_RGBX101010:
3284 		return DRM_FORMAT_XBGR2101010;
3285 	case DISPPLANE_BGRA101010:
3286 		return DRM_FORMAT_ARGB2101010;
3287 	case DISPPLANE_RGBA101010:
3288 		return DRM_FORMAT_ABGR2101010;
3289 	case DISPPLANE_RGBX161616:
3290 		return DRM_FORMAT_XBGR16161616F;
3291 	}
3292 }
3293 
3294 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3295 {
3296 	switch (format) {
3297 	case PLANE_CTL_FORMAT_RGB_565:
3298 		return DRM_FORMAT_RGB565;
3299 	case PLANE_CTL_FORMAT_NV12:
3300 		return DRM_FORMAT_NV12;
3301 	case PLANE_CTL_FORMAT_XYUV:
3302 		return DRM_FORMAT_XYUV8888;
3303 	case PLANE_CTL_FORMAT_P010:
3304 		return DRM_FORMAT_P010;
3305 	case PLANE_CTL_FORMAT_P012:
3306 		return DRM_FORMAT_P012;
3307 	case PLANE_CTL_FORMAT_P016:
3308 		return DRM_FORMAT_P016;
3309 	case PLANE_CTL_FORMAT_Y210:
3310 		return DRM_FORMAT_Y210;
3311 	case PLANE_CTL_FORMAT_Y212:
3312 		return DRM_FORMAT_Y212;
3313 	case PLANE_CTL_FORMAT_Y216:
3314 		return DRM_FORMAT_Y216;
3315 	case PLANE_CTL_FORMAT_Y410:
3316 		return DRM_FORMAT_XVYU2101010;
3317 	case PLANE_CTL_FORMAT_Y412:
3318 		return DRM_FORMAT_XVYU12_16161616;
3319 	case PLANE_CTL_FORMAT_Y416:
3320 		return DRM_FORMAT_XVYU16161616;
3321 	default:
3322 	case PLANE_CTL_FORMAT_XRGB_8888:
3323 		if (rgb_order) {
3324 			if (alpha)
3325 				return DRM_FORMAT_ABGR8888;
3326 			else
3327 				return DRM_FORMAT_XBGR8888;
3328 		} else {
3329 			if (alpha)
3330 				return DRM_FORMAT_ARGB8888;
3331 			else
3332 				return DRM_FORMAT_XRGB8888;
3333 		}
3334 	case PLANE_CTL_FORMAT_XRGB_2101010:
3335 		if (rgb_order) {
3336 			if (alpha)
3337 				return DRM_FORMAT_ABGR2101010;
3338 			else
3339 				return DRM_FORMAT_XBGR2101010;
3340 		} else {
3341 			if (alpha)
3342 				return DRM_FORMAT_ARGB2101010;
3343 			else
3344 				return DRM_FORMAT_XRGB2101010;
3345 		}
3346 	case PLANE_CTL_FORMAT_XRGB_16161616F:
3347 		if (rgb_order) {
3348 			if (alpha)
3349 				return DRM_FORMAT_ABGR16161616F;
3350 			else
3351 				return DRM_FORMAT_XBGR16161616F;
3352 		} else {
3353 			if (alpha)
3354 				return DRM_FORMAT_ARGB16161616F;
3355 			else
3356 				return DRM_FORMAT_XRGB16161616F;
3357 		}
3358 	}
3359 }
3360 
3361 static struct i915_vma *
3362 initial_plane_vma(struct drm_i915_private *i915,
3363 		  struct intel_initial_plane_config *plane_config)
3364 {
3365 	struct drm_i915_gem_object *obj;
3366 	struct i915_vma *vma;
3367 	u32 base, size;
3368 
3369 	if (plane_config->size == 0)
3370 		return NULL;
3371 
3372 	base = round_down(plane_config->base,
3373 			  I915_GTT_MIN_ALIGNMENT);
3374 	size = round_up(plane_config->base + plane_config->size,
3375 			I915_GTT_MIN_ALIGNMENT);
3376 	size -= base;
3377 
3378 	/*
3379 	 * If the FB is too big, just don't use it since fbdev is not very
3380 	 * important and we should probably use that space with FBC or other
3381 	 * features.
3382 	 */
3383 	if (size * 2 > i915->stolen_usable_size)
3384 		return NULL;
3385 
3386 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3387 	if (IS_ERR(obj))
3388 		return NULL;
3389 
3390 	/*
3391 	 * Mark it WT ahead of time to avoid changing the
3392 	 * cache_level during fbdev initialization. The
3393 	 * unbind there would get stuck waiting for rcu.
3394 	 */
3395 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
3396 					    I915_CACHE_WT : I915_CACHE_NONE);
3397 
3398 	switch (plane_config->tiling) {
3399 	case I915_TILING_NONE:
3400 		break;
3401 	case I915_TILING_X:
3402 	case I915_TILING_Y:
3403 		obj->tiling_and_stride =
3404 			plane_config->fb->base.pitches[0] |
3405 			plane_config->tiling;
3406 		break;
3407 	default:
3408 		MISSING_CASE(plane_config->tiling);
3409 		goto err_obj;
3410 	}
3411 
3412 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3413 	if (IS_ERR(vma))
3414 		goto err_obj;
3415 
3416 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3417 		goto err_obj;
3418 
3419 	if (i915_gem_object_is_tiled(obj) &&
3420 	    !i915_vma_is_map_and_fenceable(vma))
3421 		goto err_obj;
3422 
3423 	return vma;
3424 
3425 err_obj:
3426 	i915_gem_object_put(obj);
3427 	return NULL;
3428 }
3429 
3430 static bool
3431 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3432 			      struct intel_initial_plane_config *plane_config)
3433 {
3434 	struct drm_device *dev = crtc->base.dev;
3435 	struct drm_i915_private *dev_priv = to_i915(dev);
3436 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3437 	struct drm_framebuffer *fb = &plane_config->fb->base;
3438 	struct i915_vma *vma;
3439 
3440 	switch (fb->modifier) {
3441 	case DRM_FORMAT_MOD_LINEAR:
3442 	case I915_FORMAT_MOD_X_TILED:
3443 	case I915_FORMAT_MOD_Y_TILED:
3444 		break;
3445 	default:
3446 		drm_dbg(&dev_priv->drm,
3447 			"Unsupported modifier for initial FB: 0x%llx\n",
3448 			fb->modifier);
3449 		return false;
3450 	}
3451 
3452 	vma = initial_plane_vma(dev_priv, plane_config);
3453 	if (!vma)
3454 		return false;
3455 
3456 	mode_cmd.pixel_format = fb->format->format;
3457 	mode_cmd.width = fb->width;
3458 	mode_cmd.height = fb->height;
3459 	mode_cmd.pitches[0] = fb->pitches[0];
3460 	mode_cmd.modifier[0] = fb->modifier;
3461 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3462 
3463 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
3464 				   vma->obj, &mode_cmd)) {
3465 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3466 		goto err_vma;
3467 	}
3468 
3469 	plane_config->vma = vma;
3470 	return true;
3471 
3472 err_vma:
3473 	i915_vma_put(vma);
3474 	return false;
3475 }
3476 
3477 static void
3478 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3479 			struct intel_plane_state *plane_state,
3480 			bool visible)
3481 {
3482 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3483 
3484 	plane_state->uapi.visible = visible;
3485 
3486 	if (visible)
3487 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3488 	else
3489 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3490 }
3491 
3492 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
3493 {
3494 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3495 	struct drm_plane *plane;
3496 
3497 	/*
3498 	 * Active_planes aliases if multiple "primary" or cursor planes
3499 	 * have been used on the same (or wrong) pipe. plane_mask uses
3500 	 * unique ids, hence we can use that to reconstruct active_planes.
3501 	 */
3502 	crtc_state->enabled_planes = 0;
3503 	crtc_state->active_planes = 0;
3504 
3505 	drm_for_each_plane_mask(plane, &dev_priv->drm,
3506 				crtc_state->uapi.plane_mask) {
3507 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
3508 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3509 	}
3510 }
3511 
3512 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3513 					 struct intel_plane *plane)
3514 {
3515 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3516 	struct intel_crtc_state *crtc_state =
3517 		to_intel_crtc_state(crtc->base.state);
3518 	struct intel_plane_state *plane_state =
3519 		to_intel_plane_state(plane->base.state);
3520 
3521 	drm_dbg_kms(&dev_priv->drm,
3522 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3523 		    plane->base.base.id, plane->base.name,
3524 		    crtc->base.base.id, crtc->base.name);
3525 
3526 	intel_set_plane_visible(crtc_state, plane_state, false);
3527 	fixup_plane_bitmasks(crtc_state);
3528 	crtc_state->data_rate[plane->id] = 0;
3529 	crtc_state->min_cdclk[plane->id] = 0;
3530 
3531 	if (plane->id == PLANE_PRIMARY)
3532 		hsw_disable_ips(crtc_state);
3533 
3534 	/*
3535 	 * Vblank time updates from the shadow to live plane control register
3536 	 * are blocked if the memory self-refresh mode is active at that
3537 	 * moment. So to make sure the plane gets truly disabled, disable
3538 	 * first the self-refresh mode. The self-refresh enable bit in turn
3539 	 * will be checked/applied by the HW only at the next frame start
3540 	 * event which is after the vblank start event, so we need to have a
3541 	 * wait-for-vblank between disabling the plane and the pipe.
3542 	 */
3543 	if (HAS_GMCH(dev_priv) &&
3544 	    intel_set_memory_cxsr(dev_priv, false))
3545 		intel_wait_for_vblank(dev_priv, crtc->pipe);
3546 
3547 	/*
3548 	 * Gen2 reports pipe underruns whenever all planes are disabled.
3549 	 * So disable underrun reporting before all the planes get disabled.
3550 	 */
3551 	if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3552 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3553 
3554 	intel_disable_plane(plane, crtc_state);
3555 }
3556 
3557 static void
3558 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3559 			     struct intel_initial_plane_config *plane_config)
3560 {
3561 	struct drm_device *dev = intel_crtc->base.dev;
3562 	struct drm_i915_private *dev_priv = to_i915(dev);
3563 	struct drm_crtc *c;
3564 	struct drm_plane *primary = intel_crtc->base.primary;
3565 	struct drm_plane_state *plane_state = primary->state;
3566 	struct intel_plane *intel_plane = to_intel_plane(primary);
3567 	struct intel_plane_state *intel_state =
3568 		to_intel_plane_state(plane_state);
3569 	struct intel_crtc_state *crtc_state =
3570 		to_intel_crtc_state(intel_crtc->base.state);
3571 	struct drm_framebuffer *fb;
3572 	struct i915_vma *vma;
3573 
3574 	if (!plane_config->fb)
3575 		return;
3576 
3577 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3578 		fb = &plane_config->fb->base;
3579 		vma = plane_config->vma;
3580 		goto valid_fb;
3581 	}
3582 
3583 	/*
3584 	 * Failed to alloc the obj, check to see if we should share
3585 	 * an fb with another CRTC instead
3586 	 */
3587 	for_each_crtc(dev, c) {
3588 		struct intel_plane_state *state;
3589 
3590 		if (c == &intel_crtc->base)
3591 			continue;
3592 
3593 		if (!to_intel_crtc_state(c->state)->uapi.active)
3594 			continue;
3595 
3596 		state = to_intel_plane_state(c->primary->state);
3597 		if (!state->vma)
3598 			continue;
3599 
3600 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
3601 			fb = state->hw.fb;
3602 			vma = state->vma;
3603 			goto valid_fb;
3604 		}
3605 	}
3606 
3607 	/*
3608 	 * We've failed to reconstruct the BIOS FB.  Current display state
3609 	 * indicates that the primary plane is visible, but has a NULL FB,
3610 	 * which will lead to problems later if we don't fix it up.  The
3611 	 * simplest solution is to just disable the primary plane now and
3612 	 * pretend the BIOS never had it enabled.
3613 	 */
3614 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
3615 	if (crtc_state->bigjoiner) {
3616 		struct intel_crtc *slave =
3617 			crtc_state->bigjoiner_linked_crtc;
3618 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
3619 	}
3620 
3621 	return;
3622 
3623 valid_fb:
3624 	intel_state->hw.rotation = plane_config->rotation;
3625 	intel_fill_fb_ggtt_view(&intel_state->view, fb,
3626 				intel_state->hw.rotation);
3627 	intel_state->color_plane[0].stride =
3628 		intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3629 
3630 	__i915_vma_pin(vma);
3631 	intel_state->vma = i915_vma_get(vma);
3632 	if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3633 		if (vma->fence)
3634 			intel_state->flags |= PLANE_HAS_FENCE;
3635 
3636 	plane_state->src_x = 0;
3637 	plane_state->src_y = 0;
3638 	plane_state->src_w = fb->width << 16;
3639 	plane_state->src_h = fb->height << 16;
3640 
3641 	plane_state->crtc_x = 0;
3642 	plane_state->crtc_y = 0;
3643 	plane_state->crtc_w = fb->width;
3644 	plane_state->crtc_h = fb->height;
3645 
3646 	intel_state->uapi.src = drm_plane_state_src(plane_state);
3647 	intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3648 
3649 	if (plane_config->tiling)
3650 		dev_priv->preserve_bios_swizzle = true;
3651 
3652 	plane_state->fb = fb;
3653 	drm_framebuffer_get(fb);
3654 
3655 	plane_state->crtc = &intel_crtc->base;
3656 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
3657 					  intel_crtc);
3658 
3659 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3660 
3661 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3662 		  &to_intel_frontbuffer(fb)->bits);
3663 }
3664 
3665 
3666 static bool
3667 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3668 			       int main_x, int main_y, u32 main_offset,
3669 			       int ccs_plane)
3670 {
3671 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3672 	int aux_x = plane_state->color_plane[ccs_plane].x;
3673 	int aux_y = plane_state->color_plane[ccs_plane].y;
3674 	u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3675 	u32 alignment = intel_surf_alignment(fb, ccs_plane);
3676 	int hsub;
3677 	int vsub;
3678 
3679 	intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3680 	while (aux_offset >= main_offset && aux_y <= main_y) {
3681 		int x, y;
3682 
3683 		if (aux_x == main_x && aux_y == main_y)
3684 			break;
3685 
3686 		if (aux_offset == 0)
3687 			break;
3688 
3689 		x = aux_x / hsub;
3690 		y = aux_y / vsub;
3691 		aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3692 							       plane_state,
3693 							       ccs_plane,
3694 							       aux_offset,
3695 							       aux_offset -
3696 								alignment);
3697 		aux_x = x * hsub + aux_x % hsub;
3698 		aux_y = y * vsub + aux_y % vsub;
3699 	}
3700 
3701 	if (aux_x != main_x || aux_y != main_y)
3702 		return false;
3703 
3704 	plane_state->color_plane[ccs_plane].offset = aux_offset;
3705 	plane_state->color_plane[ccs_plane].x = aux_x;
3706 	plane_state->color_plane[ccs_plane].y = aux_y;
3707 
3708 	return true;
3709 }
3710 
3711 unsigned int
3712 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
3713 {
3714 	int x = 0, y = 0;
3715 
3716 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3717 					  plane_state->color_plane[0].offset, 0);
3718 
3719 	return y;
3720 }
3721 
3722 static int intel_plane_min_width(struct intel_plane *plane,
3723 				 const struct drm_framebuffer *fb,
3724 				 int color_plane,
3725 				 unsigned int rotation)
3726 {
3727 	if (plane->min_width)
3728 		return plane->min_width(fb, color_plane, rotation);
3729 	else
3730 		return 1;
3731 }
3732 
3733 static int intel_plane_max_width(struct intel_plane *plane,
3734 				 const struct drm_framebuffer *fb,
3735 				 int color_plane,
3736 				 unsigned int rotation)
3737 {
3738 	if (plane->max_width)
3739 		return plane->max_width(fb, color_plane, rotation);
3740 	else
3741 		return INT_MAX;
3742 }
3743 
3744 static int intel_plane_max_height(struct intel_plane *plane,
3745 				  const struct drm_framebuffer *fb,
3746 				  int color_plane,
3747 				  unsigned int rotation)
3748 {
3749 	if (plane->max_height)
3750 		return plane->max_height(fb, color_plane, rotation);
3751 	else
3752 		return INT_MAX;
3753 }
3754 
3755 int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
3756 				 int *x, int *y, u32 *offset)
3757 {
3758 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3759 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3760 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3761 	const int aux_plane = intel_main_to_aux_plane(fb, 0);
3762 	const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3763 	const u32 alignment = intel_surf_alignment(fb, 0);
3764 	const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3765 
3766 	intel_add_fb_offsets(x, y, plane_state, 0);
3767 	*offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
3768 	if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
3769 		return -EINVAL;
3770 
3771 	/*
3772 	 * AUX surface offset is specified as the distance from the
3773 	 * main surface offset, and it must be non-negative. Make
3774 	 * sure that is what we will get.
3775 	 */
3776 	if (aux_plane && *offset > aux_offset)
3777 		*offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
3778 							    *offset,
3779 							    aux_offset & ~(alignment - 1));
3780 
3781 	/*
3782 	 * When using an X-tiled surface, the plane blows up
3783 	 * if the x offset + width exceed the stride.
3784 	 *
3785 	 * TODO: linear and Y-tiled seem fine, Yf untested,
3786 	 */
3787 	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3788 		int cpp = fb->format->cpp[0];
3789 
3790 		while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
3791 			if (*offset == 0) {
3792 				drm_dbg_kms(&dev_priv->drm,
3793 					    "Unable to find suitable display surface offset due to X-tiling\n");
3794 				return -EINVAL;
3795 			}
3796 
3797 			*offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
3798 								    *offset,
3799 								    *offset - alignment);
3800 		}
3801 	}
3802 
3803 	return 0;
3804 }
3805 
3806 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3807 {
3808 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3809 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3810 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3811 	const unsigned int rotation = plane_state->hw.rotation;
3812 	int x = plane_state->uapi.src.x1 >> 16;
3813 	int y = plane_state->uapi.src.y1 >> 16;
3814 	const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3815 	const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3816 	const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
3817 	const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
3818 	const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
3819 	const int aux_plane = intel_main_to_aux_plane(fb, 0);
3820 	const u32 alignment = intel_surf_alignment(fb, 0);
3821 	u32 offset;
3822 	int ret;
3823 
3824 	if (w > max_width || w < min_width || h > max_height) {
3825 		drm_dbg_kms(&dev_priv->drm,
3826 			    "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
3827 			    w, h, min_width, max_width, max_height);
3828 		return -EINVAL;
3829 	}
3830 
3831 	ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
3832 	if (ret)
3833 		return ret;
3834 
3835 	/*
3836 	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3837 	 * they match with the main surface x/y offsets.
3838 	 */
3839 	if (is_ccs_modifier(fb->modifier)) {
3840 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3841 						       offset, aux_plane)) {
3842 			if (offset == 0)
3843 				break;
3844 
3845 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3846 								   offset, offset - alignment);
3847 		}
3848 
3849 		if (x != plane_state->color_plane[aux_plane].x ||
3850 		    y != plane_state->color_plane[aux_plane].y) {
3851 			drm_dbg_kms(&dev_priv->drm,
3852 				    "Unable to find suitable display surface offset due to CCS\n");
3853 			return -EINVAL;
3854 		}
3855 	}
3856 
3857 	plane_state->color_plane[0].offset = offset;
3858 	plane_state->color_plane[0].x = x;
3859 	plane_state->color_plane[0].y = y;
3860 
3861 	/*
3862 	 * Put the final coordinates back so that the src
3863 	 * coordinate checks will see the right values.
3864 	 */
3865 	drm_rect_translate_to(&plane_state->uapi.src,
3866 			      x << 16, y << 16);
3867 
3868 	return 0;
3869 }
3870 
3871 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3872 {
3873 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3874 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
3875 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3876 	unsigned int rotation = plane_state->hw.rotation;
3877 	int uv_plane = 1;
3878 	int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
3879 	int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
3880 	int x = plane_state->uapi.src.x1 >> 17;
3881 	int y = plane_state->uapi.src.y1 >> 17;
3882 	int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3883 	int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3884 	u32 offset;
3885 
3886 	/* FIXME not quite sure how/if these apply to the chroma plane */
3887 	if (w > max_width || h > max_height) {
3888 		drm_dbg_kms(&i915->drm,
3889 			    "CbCr source size %dx%d too big (limit %dx%d)\n",
3890 			    w, h, max_width, max_height);
3891 		return -EINVAL;
3892 	}
3893 
3894 	intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3895 	offset = intel_plane_compute_aligned_offset(&x, &y,
3896 						    plane_state, uv_plane);
3897 
3898 	if (is_ccs_modifier(fb->modifier)) {
3899 		int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3900 		u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3901 		u32 alignment = intel_surf_alignment(fb, uv_plane);
3902 
3903 		if (offset > aux_offset)
3904 			offset = intel_plane_adjust_aligned_offset(&x, &y,
3905 								   plane_state,
3906 								   uv_plane,
3907 								   offset,
3908 								   aux_offset & ~(alignment - 1));
3909 
3910 		while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3911 						       offset, ccs_plane)) {
3912 			if (offset == 0)
3913 				break;
3914 
3915 			offset = intel_plane_adjust_aligned_offset(&x, &y,
3916 								   plane_state,
3917 								   uv_plane,
3918 								   offset, offset - alignment);
3919 		}
3920 
3921 		if (x != plane_state->color_plane[ccs_plane].x ||
3922 		    y != plane_state->color_plane[ccs_plane].y) {
3923 			drm_dbg_kms(&i915->drm,
3924 				    "Unable to find suitable display surface offset due to CCS\n");
3925 			return -EINVAL;
3926 		}
3927 	}
3928 
3929 	plane_state->color_plane[uv_plane].offset = offset;
3930 	plane_state->color_plane[uv_plane].x = x;
3931 	plane_state->color_plane[uv_plane].y = y;
3932 
3933 	return 0;
3934 }
3935 
3936 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3937 {
3938 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3939 	int src_x = plane_state->uapi.src.x1 >> 16;
3940 	int src_y = plane_state->uapi.src.y1 >> 16;
3941 	u32 offset;
3942 	int ccs_plane;
3943 
3944 	for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
3945 		int main_hsub, main_vsub;
3946 		int hsub, vsub;
3947 		int x, y;
3948 
3949 		if (!is_ccs_plane(fb, ccs_plane))
3950 			continue;
3951 
3952 		intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
3953 					       ccs_to_main_plane(fb, ccs_plane));
3954 		intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3955 
3956 		hsub *= main_hsub;
3957 		vsub *= main_vsub;
3958 		x = src_x / hsub;
3959 		y = src_y / vsub;
3960 
3961 		intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
3962 
3963 		offset = intel_plane_compute_aligned_offset(&x, &y,
3964 							    plane_state,
3965 							    ccs_plane);
3966 
3967 		plane_state->color_plane[ccs_plane].offset = offset;
3968 		plane_state->color_plane[ccs_plane].x = (x * hsub +
3969 							 src_x % hsub) /
3970 							main_hsub;
3971 		plane_state->color_plane[ccs_plane].y = (y * vsub +
3972 							 src_y % vsub) /
3973 							main_vsub;
3974 	}
3975 
3976 	return 0;
3977 }
3978 
3979 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3980 {
3981 	const struct drm_framebuffer *fb = plane_state->hw.fb;
3982 	int ret, i;
3983 
3984 	ret = intel_plane_compute_gtt(plane_state);
3985 	if (ret)
3986 		return ret;
3987 
3988 	if (!plane_state->uapi.visible)
3989 		return 0;
3990 
3991 	/*
3992 	 * Handle the AUX surface first since the main surface setup depends on
3993 	 * it.
3994 	 */
3995 	if (is_ccs_modifier(fb->modifier)) {
3996 		ret = skl_check_ccs_aux_surface(plane_state);
3997 		if (ret)
3998 			return ret;
3999 	}
4000 
4001 	if (intel_format_info_is_yuv_semiplanar(fb->format,
4002 						fb->modifier)) {
4003 		ret = skl_check_nv12_aux_surface(plane_state);
4004 		if (ret)
4005 			return ret;
4006 	}
4007 
4008 	for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
4009 		plane_state->color_plane[i].offset = 0;
4010 		plane_state->color_plane[i].x = 0;
4011 		plane_state->color_plane[i].y = 0;
4012 	}
4013 
4014 	ret = skl_check_main_surface(plane_state);
4015 	if (ret)
4016 		return ret;
4017 
4018 	return 0;
4019 }
4020 
4021 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4022 {
4023 	struct drm_device *dev = intel_crtc->base.dev;
4024 	struct drm_i915_private *dev_priv = to_i915(dev);
4025 	unsigned long irqflags;
4026 
4027 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4028 
4029 	intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4030 	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4031 	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4032 
4033 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4034 }
4035 
4036 /*
4037  * This function detaches (aka. unbinds) unused scalers in hardware
4038  */
4039 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4040 {
4041 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4042 	const struct intel_crtc_scaler_state *scaler_state =
4043 		&crtc_state->scaler_state;
4044 	int i;
4045 
4046 	/* loop through and disable scalers that aren't in use */
4047 	for (i = 0; i < intel_crtc->num_scalers; i++) {
4048 		if (!scaler_state->scalers[i].in_use)
4049 			skl_detach_scaler(intel_crtc, i);
4050 	}
4051 }
4052 
4053 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4054 					  int color_plane, unsigned int rotation)
4055 {
4056 	/*
4057 	 * The stride is either expressed as a multiple of 64 bytes chunks for
4058 	 * linear buffers or in number of tiles for tiled buffers.
4059 	 */
4060 	if (is_surface_linear(fb, color_plane))
4061 		return 64;
4062 	else if (drm_rotation_90_or_270(rotation))
4063 		return intel_tile_height(fb, color_plane);
4064 	else
4065 		return intel_tile_width_bytes(fb, color_plane);
4066 }
4067 
4068 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4069 		     int color_plane)
4070 {
4071 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4072 	unsigned int rotation = plane_state->hw.rotation;
4073 	u32 stride = plane_state->color_plane[color_plane].stride;
4074 
4075 	if (color_plane >= fb->format->num_planes)
4076 		return 0;
4077 
4078 	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4079 }
4080 
4081 static u32 skl_plane_ctl_format(u32 pixel_format)
4082 {
4083 	switch (pixel_format) {
4084 	case DRM_FORMAT_C8:
4085 		return PLANE_CTL_FORMAT_INDEXED;
4086 	case DRM_FORMAT_RGB565:
4087 		return PLANE_CTL_FORMAT_RGB_565;
4088 	case DRM_FORMAT_XBGR8888:
4089 	case DRM_FORMAT_ABGR8888:
4090 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4091 	case DRM_FORMAT_XRGB8888:
4092 	case DRM_FORMAT_ARGB8888:
4093 		return PLANE_CTL_FORMAT_XRGB_8888;
4094 	case DRM_FORMAT_XBGR2101010:
4095 	case DRM_FORMAT_ABGR2101010:
4096 		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4097 	case DRM_FORMAT_XRGB2101010:
4098 	case DRM_FORMAT_ARGB2101010:
4099 		return PLANE_CTL_FORMAT_XRGB_2101010;
4100 	case DRM_FORMAT_XBGR16161616F:
4101 	case DRM_FORMAT_ABGR16161616F:
4102 		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4103 	case DRM_FORMAT_XRGB16161616F:
4104 	case DRM_FORMAT_ARGB16161616F:
4105 		return PLANE_CTL_FORMAT_XRGB_16161616F;
4106 	case DRM_FORMAT_XYUV8888:
4107 		return PLANE_CTL_FORMAT_XYUV;
4108 	case DRM_FORMAT_YUYV:
4109 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4110 	case DRM_FORMAT_YVYU:
4111 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4112 	case DRM_FORMAT_UYVY:
4113 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4114 	case DRM_FORMAT_VYUY:
4115 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4116 	case DRM_FORMAT_NV12:
4117 		return PLANE_CTL_FORMAT_NV12;
4118 	case DRM_FORMAT_P010:
4119 		return PLANE_CTL_FORMAT_P010;
4120 	case DRM_FORMAT_P012:
4121 		return PLANE_CTL_FORMAT_P012;
4122 	case DRM_FORMAT_P016:
4123 		return PLANE_CTL_FORMAT_P016;
4124 	case DRM_FORMAT_Y210:
4125 		return PLANE_CTL_FORMAT_Y210;
4126 	case DRM_FORMAT_Y212:
4127 		return PLANE_CTL_FORMAT_Y212;
4128 	case DRM_FORMAT_Y216:
4129 		return PLANE_CTL_FORMAT_Y216;
4130 	case DRM_FORMAT_XVYU2101010:
4131 		return PLANE_CTL_FORMAT_Y410;
4132 	case DRM_FORMAT_XVYU12_16161616:
4133 		return PLANE_CTL_FORMAT_Y412;
4134 	case DRM_FORMAT_XVYU16161616:
4135 		return PLANE_CTL_FORMAT_Y416;
4136 	default:
4137 		MISSING_CASE(pixel_format);
4138 	}
4139 
4140 	return 0;
4141 }
4142 
4143 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4144 {
4145 	if (!plane_state->hw.fb->format->has_alpha)
4146 		return PLANE_CTL_ALPHA_DISABLE;
4147 
4148 	switch (plane_state->hw.pixel_blend_mode) {
4149 	case DRM_MODE_BLEND_PIXEL_NONE:
4150 		return PLANE_CTL_ALPHA_DISABLE;
4151 	case DRM_MODE_BLEND_PREMULTI:
4152 		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4153 	case DRM_MODE_BLEND_COVERAGE:
4154 		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4155 	default:
4156 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
4157 		return PLANE_CTL_ALPHA_DISABLE;
4158 	}
4159 }
4160 
4161 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4162 {
4163 	if (!plane_state->hw.fb->format->has_alpha)
4164 		return PLANE_COLOR_ALPHA_DISABLE;
4165 
4166 	switch (plane_state->hw.pixel_blend_mode) {
4167 	case DRM_MODE_BLEND_PIXEL_NONE:
4168 		return PLANE_COLOR_ALPHA_DISABLE;
4169 	case DRM_MODE_BLEND_PREMULTI:
4170 		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4171 	case DRM_MODE_BLEND_COVERAGE:
4172 		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4173 	default:
4174 		MISSING_CASE(plane_state->hw.pixel_blend_mode);
4175 		return PLANE_COLOR_ALPHA_DISABLE;
4176 	}
4177 }
4178 
4179 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4180 {
4181 	switch (fb_modifier) {
4182 	case DRM_FORMAT_MOD_LINEAR:
4183 		break;
4184 	case I915_FORMAT_MOD_X_TILED:
4185 		return PLANE_CTL_TILED_X;
4186 	case I915_FORMAT_MOD_Y_TILED:
4187 		return PLANE_CTL_TILED_Y;
4188 	case I915_FORMAT_MOD_Y_TILED_CCS:
4189 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4190 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4191 		return PLANE_CTL_TILED_Y |
4192 		       PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4193 		       PLANE_CTL_CLEAR_COLOR_DISABLE;
4194 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4195 		return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4196 	case I915_FORMAT_MOD_Yf_TILED:
4197 		return PLANE_CTL_TILED_YF;
4198 	case I915_FORMAT_MOD_Yf_TILED_CCS:
4199 		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4200 	default:
4201 		MISSING_CASE(fb_modifier);
4202 	}
4203 
4204 	return 0;
4205 }
4206 
4207 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4208 {
4209 	switch (rotate) {
4210 	case DRM_MODE_ROTATE_0:
4211 		break;
4212 	/*
4213 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4214 	 * while i915 HW rotation is clockwise, thats why this swapping.
4215 	 */
4216 	case DRM_MODE_ROTATE_90:
4217 		return PLANE_CTL_ROTATE_270;
4218 	case DRM_MODE_ROTATE_180:
4219 		return PLANE_CTL_ROTATE_180;
4220 	case DRM_MODE_ROTATE_270:
4221 		return PLANE_CTL_ROTATE_90;
4222 	default:
4223 		MISSING_CASE(rotate);
4224 	}
4225 
4226 	return 0;
4227 }
4228 
4229 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4230 {
4231 	switch (reflect) {
4232 	case 0:
4233 		break;
4234 	case DRM_MODE_REFLECT_X:
4235 		return PLANE_CTL_FLIP_HORIZONTAL;
4236 	case DRM_MODE_REFLECT_Y:
4237 	default:
4238 		MISSING_CASE(reflect);
4239 	}
4240 
4241 	return 0;
4242 }
4243 
4244 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4245 {
4246 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4247 	u32 plane_ctl = 0;
4248 
4249 	if (crtc_state->uapi.async_flip)
4250 		plane_ctl |= PLANE_CTL_ASYNC_FLIP;
4251 
4252 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4253 		return plane_ctl;
4254 
4255 	if (crtc_state->gamma_enable)
4256 		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4257 
4258 	if (crtc_state->csc_enable)
4259 		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4260 
4261 	return plane_ctl;
4262 }
4263 
4264 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4265 		  const struct intel_plane_state *plane_state)
4266 {
4267 	struct drm_i915_private *dev_priv =
4268 		to_i915(plane_state->uapi.plane->dev);
4269 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4270 	unsigned int rotation = plane_state->hw.rotation;
4271 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4272 	u32 plane_ctl;
4273 
4274 	plane_ctl = PLANE_CTL_ENABLE;
4275 
4276 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4277 		plane_ctl |= skl_plane_ctl_alpha(plane_state);
4278 		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4279 
4280 		if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4281 			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4282 
4283 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4284 			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4285 	}
4286 
4287 	plane_ctl |= skl_plane_ctl_format(fb->format->format);
4288 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4289 	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4290 
4291 	if (INTEL_GEN(dev_priv) >= 10)
4292 		plane_ctl |= cnl_plane_ctl_flip(rotation &
4293 						DRM_MODE_REFLECT_MASK);
4294 
4295 	if (key->flags & I915_SET_COLORKEY_DESTINATION)
4296 		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4297 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
4298 		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4299 
4300 	return plane_ctl;
4301 }
4302 
4303 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4304 {
4305 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4306 	u32 plane_color_ctl = 0;
4307 
4308 	if (INTEL_GEN(dev_priv) >= 11)
4309 		return plane_color_ctl;
4310 
4311 	if (crtc_state->gamma_enable)
4312 		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4313 
4314 	if (crtc_state->csc_enable)
4315 		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4316 
4317 	return plane_color_ctl;
4318 }
4319 
4320 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4321 			const struct intel_plane_state *plane_state)
4322 {
4323 	struct drm_i915_private *dev_priv =
4324 		to_i915(plane_state->uapi.plane->dev);
4325 	const struct drm_framebuffer *fb = plane_state->hw.fb;
4326 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4327 	u32 plane_color_ctl = 0;
4328 
4329 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4330 	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4331 
4332 	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4333 		switch (plane_state->hw.color_encoding) {
4334 		case DRM_COLOR_YCBCR_BT709:
4335 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4336 			break;
4337 		case DRM_COLOR_YCBCR_BT2020:
4338 			plane_color_ctl |=
4339 				PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
4340 			break;
4341 		default:
4342 			plane_color_ctl |=
4343 				PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
4344 		}
4345 		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4346 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4347 	} else if (fb->format->is_yuv) {
4348 		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4349 	}
4350 
4351 	return plane_color_ctl;
4352 }
4353 
4354 static int
4355 __intel_display_resume(struct drm_device *dev,
4356 		       struct drm_atomic_state *state,
4357 		       struct drm_modeset_acquire_ctx *ctx)
4358 {
4359 	struct drm_crtc_state *crtc_state;
4360 	struct drm_crtc *crtc;
4361 	int i, ret;
4362 
4363 	intel_modeset_setup_hw_state(dev, ctx);
4364 	intel_vga_redisable(to_i915(dev));
4365 
4366 	if (!state)
4367 		return 0;
4368 
4369 	/*
4370 	 * We've duplicated the state, pointers to the old state are invalid.
4371 	 *
4372 	 * Don't attempt to use the old state until we commit the duplicated state.
4373 	 */
4374 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4375 		/*
4376 		 * Force recalculation even if we restore
4377 		 * current state. With fast modeset this may not result
4378 		 * in a modeset when the state is compatible.
4379 		 */
4380 		crtc_state->mode_changed = true;
4381 	}
4382 
4383 	/* ignore any reset values/BIOS leftovers in the WM registers */
4384 	if (!HAS_GMCH(to_i915(dev)))
4385 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
4386 
4387 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4388 
4389 	drm_WARN_ON(dev, ret == -EDEADLK);
4390 	return ret;
4391 }
4392 
4393 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4394 {
4395 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4396 		intel_has_gpu_reset(&dev_priv->gt));
4397 }
4398 
4399 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
4400 {
4401 	struct drm_device *dev = &dev_priv->drm;
4402 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4403 	struct drm_atomic_state *state;
4404 	int ret;
4405 
4406 	if (!HAS_DISPLAY(dev_priv))
4407 		return;
4408 
4409 	/* reset doesn't touch the display */
4410 	if (!dev_priv->params.force_reset_modeset_test &&
4411 	    !gpu_reset_clobbers_display(dev_priv))
4412 		return;
4413 
4414 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
4415 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4416 	smp_mb__after_atomic();
4417 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4418 
4419 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4420 		drm_dbg_kms(&dev_priv->drm,
4421 			    "Modeset potentially stuck, unbreaking through wedging\n");
4422 		intel_gt_set_wedged(&dev_priv->gt);
4423 	}
4424 
4425 	/*
4426 	 * Need mode_config.mutex so that we don't
4427 	 * trample ongoing ->detect() and whatnot.
4428 	 */
4429 	mutex_lock(&dev->mode_config.mutex);
4430 	drm_modeset_acquire_init(ctx, 0);
4431 	while (1) {
4432 		ret = drm_modeset_lock_all_ctx(dev, ctx);
4433 		if (ret != -EDEADLK)
4434 			break;
4435 
4436 		drm_modeset_backoff(ctx);
4437 	}
4438 	/*
4439 	 * Disabling the crtcs gracefully seems nicer. Also the
4440 	 * g33 docs say we should at least disable all the planes.
4441 	 */
4442 	state = drm_atomic_helper_duplicate_state(dev, ctx);
4443 	if (IS_ERR(state)) {
4444 		ret = PTR_ERR(state);
4445 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4446 			ret);
4447 		return;
4448 	}
4449 
4450 	ret = drm_atomic_helper_disable_all(dev, ctx);
4451 	if (ret) {
4452 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4453 			ret);
4454 		drm_atomic_state_put(state);
4455 		return;
4456 	}
4457 
4458 	dev_priv->modeset_restore_state = state;
4459 	state->acquire_ctx = ctx;
4460 }
4461 
4462 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
4463 {
4464 	struct drm_device *dev = &dev_priv->drm;
4465 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4466 	struct drm_atomic_state *state;
4467 	int ret;
4468 
4469 	if (!HAS_DISPLAY(dev_priv))
4470 		return;
4471 
4472 	/* reset doesn't touch the display */
4473 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4474 		return;
4475 
4476 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
4477 	if (!state)
4478 		goto unlock;
4479 
4480 	/* reset doesn't touch the display */
4481 	if (!gpu_reset_clobbers_display(dev_priv)) {
4482 		/* for testing only restore the display */
4483 		ret = __intel_display_resume(dev, state, ctx);
4484 		if (ret)
4485 			drm_err(&dev_priv->drm,
4486 				"Restoring old state failed with %i\n", ret);
4487 	} else {
4488 		/*
4489 		 * The display has been reset as well,
4490 		 * so need a full re-initialization.
4491 		 */
4492 		intel_pps_unlock_regs_wa(dev_priv);
4493 		intel_modeset_init_hw(dev_priv);
4494 		intel_init_clock_gating(dev_priv);
4495 		intel_hpd_init(dev_priv);
4496 
4497 		ret = __intel_display_resume(dev, state, ctx);
4498 		if (ret)
4499 			drm_err(&dev_priv->drm,
4500 				"Restoring old state failed with %i\n", ret);
4501 
4502 		intel_hpd_poll_disable(dev_priv);
4503 	}
4504 
4505 	drm_atomic_state_put(state);
4506 unlock:
4507 	drm_modeset_drop_locks(ctx);
4508 	drm_modeset_acquire_fini(ctx);
4509 	mutex_unlock(&dev->mode_config.mutex);
4510 
4511 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4512 }
4513 
4514 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4515 {
4516 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4517 	enum pipe pipe = crtc->pipe;
4518 	u32 tmp;
4519 
4520 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
4521 
4522 	/*
4523 	 * Display WA #1153: icl
4524 	 * enable hardware to bypass the alpha math
4525 	 * and rounding for per-pixel values 00 and 0xff
4526 	 */
4527 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4528 	/*
4529 	 * Display WA # 1605353570: icl
4530 	 * Set the pixel rounding bit to 1 for allowing
4531 	 * passthrough of Frame buffer pixels unmodified
4532 	 * across pipe
4533 	 */
4534 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4535 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
4536 }
4537 
4538 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4539 {
4540 	struct drm_device *dev = crtc->base.dev;
4541 	struct drm_i915_private *dev_priv = to_i915(dev);
4542 	enum pipe pipe = crtc->pipe;
4543 	i915_reg_t reg;
4544 	u32 temp;
4545 
4546 	/* enable normal train */
4547 	reg = FDI_TX_CTL(pipe);
4548 	temp = intel_de_read(dev_priv, reg);
4549 	if (IS_IVYBRIDGE(dev_priv)) {
4550 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4551 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4552 	} else {
4553 		temp &= ~FDI_LINK_TRAIN_NONE;
4554 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4555 	}
4556 	intel_de_write(dev_priv, reg, temp);
4557 
4558 	reg = FDI_RX_CTL(pipe);
4559 	temp = intel_de_read(dev_priv, reg);
4560 	if (HAS_PCH_CPT(dev_priv)) {
4561 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4562 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4563 	} else {
4564 		temp &= ~FDI_LINK_TRAIN_NONE;
4565 		temp |= FDI_LINK_TRAIN_NONE;
4566 	}
4567 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4568 
4569 	/* wait one idle pattern time */
4570 	intel_de_posting_read(dev_priv, reg);
4571 	udelay(1000);
4572 
4573 	/* IVB wants error correction enabled */
4574 	if (IS_IVYBRIDGE(dev_priv))
4575 		intel_de_write(dev_priv, reg,
4576 		               intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
4577 }
4578 
4579 /* The FDI link training functions for ILK/Ibexpeak. */
4580 static void ilk_fdi_link_train(struct intel_crtc *crtc,
4581 			       const struct intel_crtc_state *crtc_state)
4582 {
4583 	struct drm_device *dev = crtc->base.dev;
4584 	struct drm_i915_private *dev_priv = to_i915(dev);
4585 	enum pipe pipe = crtc->pipe;
4586 	i915_reg_t reg;
4587 	u32 temp, tries;
4588 
4589 	/* FDI needs bits from pipe first */
4590 	assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
4591 
4592 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4593 	   for train result */
4594 	reg = FDI_RX_IMR(pipe);
4595 	temp = intel_de_read(dev_priv, reg);
4596 	temp &= ~FDI_RX_SYMBOL_LOCK;
4597 	temp &= ~FDI_RX_BIT_LOCK;
4598 	intel_de_write(dev_priv, reg, temp);
4599 	intel_de_read(dev_priv, reg);
4600 	udelay(150);
4601 
4602 	/* enable CPU FDI TX and PCH FDI RX */
4603 	reg = FDI_TX_CTL(pipe);
4604 	temp = intel_de_read(dev_priv, reg);
4605 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
4606 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4607 	temp &= ~FDI_LINK_TRAIN_NONE;
4608 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4609 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
4610 
4611 	reg = FDI_RX_CTL(pipe);
4612 	temp = intel_de_read(dev_priv, reg);
4613 	temp &= ~FDI_LINK_TRAIN_NONE;
4614 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4615 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
4616 
4617 	intel_de_posting_read(dev_priv, reg);
4618 	udelay(150);
4619 
4620 	/* Ironlake workaround, enable clock pointer after FDI enable*/
4621 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
4622 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
4623 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
4624 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
4625 
4626 	reg = FDI_RX_IIR(pipe);
4627 	for (tries = 0; tries < 5; tries++) {
4628 		temp = intel_de_read(dev_priv, reg);
4629 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4630 
4631 		if ((temp & FDI_RX_BIT_LOCK)) {
4632 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
4633 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
4634 			break;
4635 		}
4636 	}
4637 	if (tries == 5)
4638 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
4639 
4640 	/* Train 2 */
4641 	reg = FDI_TX_CTL(pipe);
4642 	temp = intel_de_read(dev_priv, reg);
4643 	temp &= ~FDI_LINK_TRAIN_NONE;
4644 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4645 	intel_de_write(dev_priv, reg, temp);
4646 
4647 	reg = FDI_RX_CTL(pipe);
4648 	temp = intel_de_read(dev_priv, reg);
4649 	temp &= ~FDI_LINK_TRAIN_NONE;
4650 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4651 	intel_de_write(dev_priv, reg, temp);
4652 
4653 	intel_de_posting_read(dev_priv, reg);
4654 	udelay(150);
4655 
4656 	reg = FDI_RX_IIR(pipe);
4657 	for (tries = 0; tries < 5; tries++) {
4658 		temp = intel_de_read(dev_priv, reg);
4659 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4660 
4661 		if (temp & FDI_RX_SYMBOL_LOCK) {
4662 			intel_de_write(dev_priv, reg,
4663 				       temp | FDI_RX_SYMBOL_LOCK);
4664 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
4665 			break;
4666 		}
4667 	}
4668 	if (tries == 5)
4669 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
4670 
4671 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
4672 
4673 }
4674 
4675 static const int snb_b_fdi_train_param[] = {
4676 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4677 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4678 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4679 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4680 };
4681 
4682 /* The FDI link training functions for SNB/Cougarpoint. */
4683 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4684 				const struct intel_crtc_state *crtc_state)
4685 {
4686 	struct drm_device *dev = crtc->base.dev;
4687 	struct drm_i915_private *dev_priv = to_i915(dev);
4688 	enum pipe pipe = crtc->pipe;
4689 	i915_reg_t reg;
4690 	u32 temp, i, retry;
4691 
4692 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4693 	   for train result */
4694 	reg = FDI_RX_IMR(pipe);
4695 	temp = intel_de_read(dev_priv, reg);
4696 	temp &= ~FDI_RX_SYMBOL_LOCK;
4697 	temp &= ~FDI_RX_BIT_LOCK;
4698 	intel_de_write(dev_priv, reg, temp);
4699 
4700 	intel_de_posting_read(dev_priv, reg);
4701 	udelay(150);
4702 
4703 	/* enable CPU FDI TX and PCH FDI RX */
4704 	reg = FDI_TX_CTL(pipe);
4705 	temp = intel_de_read(dev_priv, reg);
4706 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
4707 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4708 	temp &= ~FDI_LINK_TRAIN_NONE;
4709 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4710 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4711 	/* SNB-B */
4712 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4713 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
4714 
4715 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
4716 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4717 
4718 	reg = FDI_RX_CTL(pipe);
4719 	temp = intel_de_read(dev_priv, reg);
4720 	if (HAS_PCH_CPT(dev_priv)) {
4721 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4722 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4723 	} else {
4724 		temp &= ~FDI_LINK_TRAIN_NONE;
4725 		temp |= FDI_LINK_TRAIN_PATTERN_1;
4726 	}
4727 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
4728 
4729 	intel_de_posting_read(dev_priv, reg);
4730 	udelay(150);
4731 
4732 	for (i = 0; i < 4; i++) {
4733 		reg = FDI_TX_CTL(pipe);
4734 		temp = intel_de_read(dev_priv, reg);
4735 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4736 		temp |= snb_b_fdi_train_param[i];
4737 		intel_de_write(dev_priv, reg, temp);
4738 
4739 		intel_de_posting_read(dev_priv, reg);
4740 		udelay(500);
4741 
4742 		for (retry = 0; retry < 5; retry++) {
4743 			reg = FDI_RX_IIR(pipe);
4744 			temp = intel_de_read(dev_priv, reg);
4745 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4746 			if (temp & FDI_RX_BIT_LOCK) {
4747 				intel_de_write(dev_priv, reg,
4748 					       temp | FDI_RX_BIT_LOCK);
4749 				drm_dbg_kms(&dev_priv->drm,
4750 					    "FDI train 1 done.\n");
4751 				break;
4752 			}
4753 			udelay(50);
4754 		}
4755 		if (retry < 5)
4756 			break;
4757 	}
4758 	if (i == 4)
4759 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
4760 
4761 	/* Train 2 */
4762 	reg = FDI_TX_CTL(pipe);
4763 	temp = intel_de_read(dev_priv, reg);
4764 	temp &= ~FDI_LINK_TRAIN_NONE;
4765 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4766 	if (IS_GEN(dev_priv, 6)) {
4767 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4768 		/* SNB-B */
4769 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4770 	}
4771 	intel_de_write(dev_priv, reg, temp);
4772 
4773 	reg = FDI_RX_CTL(pipe);
4774 	temp = intel_de_read(dev_priv, reg);
4775 	if (HAS_PCH_CPT(dev_priv)) {
4776 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4777 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4778 	} else {
4779 		temp &= ~FDI_LINK_TRAIN_NONE;
4780 		temp |= FDI_LINK_TRAIN_PATTERN_2;
4781 	}
4782 	intel_de_write(dev_priv, reg, temp);
4783 
4784 	intel_de_posting_read(dev_priv, reg);
4785 	udelay(150);
4786 
4787 	for (i = 0; i < 4; i++) {
4788 		reg = FDI_TX_CTL(pipe);
4789 		temp = intel_de_read(dev_priv, reg);
4790 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4791 		temp |= snb_b_fdi_train_param[i];
4792 		intel_de_write(dev_priv, reg, temp);
4793 
4794 		intel_de_posting_read(dev_priv, reg);
4795 		udelay(500);
4796 
4797 		for (retry = 0; retry < 5; retry++) {
4798 			reg = FDI_RX_IIR(pipe);
4799 			temp = intel_de_read(dev_priv, reg);
4800 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4801 			if (temp & FDI_RX_SYMBOL_LOCK) {
4802 				intel_de_write(dev_priv, reg,
4803 					       temp | FDI_RX_SYMBOL_LOCK);
4804 				drm_dbg_kms(&dev_priv->drm,
4805 					    "FDI train 2 done.\n");
4806 				break;
4807 			}
4808 			udelay(50);
4809 		}
4810 		if (retry < 5)
4811 			break;
4812 	}
4813 	if (i == 4)
4814 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
4815 
4816 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
4817 }
4818 
4819 /* Manual link training for Ivy Bridge A0 parts */
4820 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4821 				      const struct intel_crtc_state *crtc_state)
4822 {
4823 	struct drm_device *dev = crtc->base.dev;
4824 	struct drm_i915_private *dev_priv = to_i915(dev);
4825 	enum pipe pipe = crtc->pipe;
4826 	i915_reg_t reg;
4827 	u32 temp, i, j;
4828 
4829 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4830 	   for train result */
4831 	reg = FDI_RX_IMR(pipe);
4832 	temp = intel_de_read(dev_priv, reg);
4833 	temp &= ~FDI_RX_SYMBOL_LOCK;
4834 	temp &= ~FDI_RX_BIT_LOCK;
4835 	intel_de_write(dev_priv, reg, temp);
4836 
4837 	intel_de_posting_read(dev_priv, reg);
4838 	udelay(150);
4839 
4840 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
4841 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
4842 
4843 	/* Try each vswing and preemphasis setting twice before moving on */
4844 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4845 		/* disable first in case we need to retry */
4846 		reg = FDI_TX_CTL(pipe);
4847 		temp = intel_de_read(dev_priv, reg);
4848 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4849 		temp &= ~FDI_TX_ENABLE;
4850 		intel_de_write(dev_priv, reg, temp);
4851 
4852 		reg = FDI_RX_CTL(pipe);
4853 		temp = intel_de_read(dev_priv, reg);
4854 		temp &= ~FDI_LINK_TRAIN_AUTO;
4855 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4856 		temp &= ~FDI_RX_ENABLE;
4857 		intel_de_write(dev_priv, reg, temp);
4858 
4859 		/* enable CPU FDI TX and PCH FDI RX */
4860 		reg = FDI_TX_CTL(pipe);
4861 		temp = intel_de_read(dev_priv, reg);
4862 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
4863 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4864 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4865 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4866 		temp |= snb_b_fdi_train_param[j/2];
4867 		temp |= FDI_COMPOSITE_SYNC;
4868 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
4869 
4870 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
4871 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4872 
4873 		reg = FDI_RX_CTL(pipe);
4874 		temp = intel_de_read(dev_priv, reg);
4875 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4876 		temp |= FDI_COMPOSITE_SYNC;
4877 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
4878 
4879 		intel_de_posting_read(dev_priv, reg);
4880 		udelay(1); /* should be 0.5us */
4881 
4882 		for (i = 0; i < 4; i++) {
4883 			reg = FDI_RX_IIR(pipe);
4884 			temp = intel_de_read(dev_priv, reg);
4885 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4886 
4887 			if (temp & FDI_RX_BIT_LOCK ||
4888 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
4889 				intel_de_write(dev_priv, reg,
4890 					       temp | FDI_RX_BIT_LOCK);
4891 				drm_dbg_kms(&dev_priv->drm,
4892 					    "FDI train 1 done, level %i.\n",
4893 					    i);
4894 				break;
4895 			}
4896 			udelay(1); /* should be 0.5us */
4897 		}
4898 		if (i == 4) {
4899 			drm_dbg_kms(&dev_priv->drm,
4900 				    "FDI train 1 fail on vswing %d\n", j / 2);
4901 			continue;
4902 		}
4903 
4904 		/* Train 2 */
4905 		reg = FDI_TX_CTL(pipe);
4906 		temp = intel_de_read(dev_priv, reg);
4907 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4908 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4909 		intel_de_write(dev_priv, reg, temp);
4910 
4911 		reg = FDI_RX_CTL(pipe);
4912 		temp = intel_de_read(dev_priv, reg);
4913 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4914 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4915 		intel_de_write(dev_priv, reg, temp);
4916 
4917 		intel_de_posting_read(dev_priv, reg);
4918 		udelay(2); /* should be 1.5us */
4919 
4920 		for (i = 0; i < 4; i++) {
4921 			reg = FDI_RX_IIR(pipe);
4922 			temp = intel_de_read(dev_priv, reg);
4923 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
4924 
4925 			if (temp & FDI_RX_SYMBOL_LOCK ||
4926 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
4927 				intel_de_write(dev_priv, reg,
4928 					       temp | FDI_RX_SYMBOL_LOCK);
4929 				drm_dbg_kms(&dev_priv->drm,
4930 					    "FDI train 2 done, level %i.\n",
4931 					    i);
4932 				goto train_done;
4933 			}
4934 			udelay(2); /* should be 1.5us */
4935 		}
4936 		if (i == 4)
4937 			drm_dbg_kms(&dev_priv->drm,
4938 				    "FDI train 2 fail on vswing %d\n", j / 2);
4939 	}
4940 
4941 train_done:
4942 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
4943 }
4944 
4945 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4946 {
4947 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4948 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4949 	enum pipe pipe = intel_crtc->pipe;
4950 	i915_reg_t reg;
4951 	u32 temp;
4952 
4953 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4954 	reg = FDI_RX_CTL(pipe);
4955 	temp = intel_de_read(dev_priv, reg);
4956 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4957 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4958 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4959 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
4960 
4961 	intel_de_posting_read(dev_priv, reg);
4962 	udelay(200);
4963 
4964 	/* Switch from Rawclk to PCDclk */
4965 	temp = intel_de_read(dev_priv, reg);
4966 	intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
4967 
4968 	intel_de_posting_read(dev_priv, reg);
4969 	udelay(200);
4970 
4971 	/* Enable CPU FDI TX PLL, always on for Ironlake */
4972 	reg = FDI_TX_CTL(pipe);
4973 	temp = intel_de_read(dev_priv, reg);
4974 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4975 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
4976 
4977 		intel_de_posting_read(dev_priv, reg);
4978 		udelay(100);
4979 	}
4980 }
4981 
4982 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
4983 {
4984 	struct drm_device *dev = intel_crtc->base.dev;
4985 	struct drm_i915_private *dev_priv = to_i915(dev);
4986 	enum pipe pipe = intel_crtc->pipe;
4987 	i915_reg_t reg;
4988 	u32 temp;
4989 
4990 	/* Switch from PCDclk to Rawclk */
4991 	reg = FDI_RX_CTL(pipe);
4992 	temp = intel_de_read(dev_priv, reg);
4993 	intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
4994 
4995 	/* Disable CPU FDI TX PLL */
4996 	reg = FDI_TX_CTL(pipe);
4997 	temp = intel_de_read(dev_priv, reg);
4998 	intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
4999 
5000 	intel_de_posting_read(dev_priv, reg);
5001 	udelay(100);
5002 
5003 	reg = FDI_RX_CTL(pipe);
5004 	temp = intel_de_read(dev_priv, reg);
5005 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5006 
5007 	/* Wait for the clocks to turn off. */
5008 	intel_de_posting_read(dev_priv, reg);
5009 	udelay(100);
5010 }
5011 
5012 static void ilk_fdi_disable(struct intel_crtc *crtc)
5013 {
5014 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5015 	enum pipe pipe = crtc->pipe;
5016 	i915_reg_t reg;
5017 	u32 temp;
5018 
5019 	/* disable CPU FDI tx and PCH FDI rx */
5020 	reg = FDI_TX_CTL(pipe);
5021 	temp = intel_de_read(dev_priv, reg);
5022 	intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5023 	intel_de_posting_read(dev_priv, reg);
5024 
5025 	reg = FDI_RX_CTL(pipe);
5026 	temp = intel_de_read(dev_priv, reg);
5027 	temp &= ~(0x7 << 16);
5028 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5029 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5030 
5031 	intel_de_posting_read(dev_priv, reg);
5032 	udelay(100);
5033 
5034 	/* Ironlake workaround, disable clock pointer after downing FDI */
5035 	if (HAS_PCH_IBX(dev_priv))
5036 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5037 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
5038 
5039 	/* still set train pattern 1 */
5040 	reg = FDI_TX_CTL(pipe);
5041 	temp = intel_de_read(dev_priv, reg);
5042 	temp &= ~FDI_LINK_TRAIN_NONE;
5043 	temp |= FDI_LINK_TRAIN_PATTERN_1;
5044 	intel_de_write(dev_priv, reg, temp);
5045 
5046 	reg = FDI_RX_CTL(pipe);
5047 	temp = intel_de_read(dev_priv, reg);
5048 	if (HAS_PCH_CPT(dev_priv)) {
5049 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5050 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5051 	} else {
5052 		temp &= ~FDI_LINK_TRAIN_NONE;
5053 		temp |= FDI_LINK_TRAIN_PATTERN_1;
5054 	}
5055 	/* BPC in FDI rx is consistent with that in PIPECONF */
5056 	temp &= ~(0x07 << 16);
5057 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5058 	intel_de_write(dev_priv, reg, temp);
5059 
5060 	intel_de_posting_read(dev_priv, reg);
5061 	udelay(100);
5062 }
5063 
5064 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5065 {
5066 	struct drm_crtc *crtc;
5067 	bool cleanup_done;
5068 
5069 	drm_for_each_crtc(crtc, &dev_priv->drm) {
5070 		struct drm_crtc_commit *commit;
5071 		spin_lock(&crtc->commit_lock);
5072 		commit = list_first_entry_or_null(&crtc->commit_list,
5073 						  struct drm_crtc_commit, commit_entry);
5074 		cleanup_done = commit ?
5075 			try_wait_for_completion(&commit->cleanup_done) : true;
5076 		spin_unlock(&crtc->commit_lock);
5077 
5078 		if (cleanup_done)
5079 			continue;
5080 
5081 		drm_crtc_wait_one_vblank(crtc);
5082 
5083 		return true;
5084 	}
5085 
5086 	return false;
5087 }
5088 
5089 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5090 {
5091 	u32 temp;
5092 
5093 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5094 
5095 	mutex_lock(&dev_priv->sb_lock);
5096 
5097 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5098 	temp |= SBI_SSCCTL_DISABLE;
5099 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5100 
5101 	mutex_unlock(&dev_priv->sb_lock);
5102 }
5103 
5104 /* Program iCLKIP clock to the desired frequency */
5105 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5106 {
5107 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5108 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5109 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5110 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
5111 	u32 temp;
5112 
5113 	lpt_disable_iclkip(dev_priv);
5114 
5115 	/* The iCLK virtual clock root frequency is in MHz,
5116 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
5117 	 * divisors, it is necessary to divide one by another, so we
5118 	 * convert the virtual clock precision to KHz here for higher
5119 	 * precision.
5120 	 */
5121 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5122 		u32 iclk_virtual_root_freq = 172800 * 1000;
5123 		u32 iclk_pi_range = 64;
5124 		u32 desired_divisor;
5125 
5126 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5127 						    clock << auxdiv);
5128 		divsel = (desired_divisor / iclk_pi_range) - 2;
5129 		phaseinc = desired_divisor % iclk_pi_range;
5130 
5131 		/*
5132 		 * Near 20MHz is a corner case which is
5133 		 * out of range for the 7-bit divisor
5134 		 */
5135 		if (divsel <= 0x7f)
5136 			break;
5137 	}
5138 
5139 	/* This should not happen with any sane values */
5140 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5141 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5142 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
5143 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5144 
5145 	drm_dbg_kms(&dev_priv->drm,
5146 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5147 		    clock, auxdiv, divsel, phasedir, phaseinc);
5148 
5149 	mutex_lock(&dev_priv->sb_lock);
5150 
5151 	/* Program SSCDIVINTPHASE6 */
5152 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5153 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5154 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5155 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5156 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5157 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5158 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5159 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5160 
5161 	/* Program SSCAUXDIV */
5162 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5163 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5164 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5165 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5166 
5167 	/* Enable modulator and associated divider */
5168 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5169 	temp &= ~SBI_SSCCTL_DISABLE;
5170 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5171 
5172 	mutex_unlock(&dev_priv->sb_lock);
5173 
5174 	/* Wait for initialization time */
5175 	udelay(24);
5176 
5177 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5178 }
5179 
5180 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5181 {
5182 	u32 divsel, phaseinc, auxdiv;
5183 	u32 iclk_virtual_root_freq = 172800 * 1000;
5184 	u32 iclk_pi_range = 64;
5185 	u32 desired_divisor;
5186 	u32 temp;
5187 
5188 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5189 		return 0;
5190 
5191 	mutex_lock(&dev_priv->sb_lock);
5192 
5193 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5194 	if (temp & SBI_SSCCTL_DISABLE) {
5195 		mutex_unlock(&dev_priv->sb_lock);
5196 		return 0;
5197 	}
5198 
5199 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5200 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5201 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5202 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5203 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5204 
5205 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5206 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5207 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5208 
5209 	mutex_unlock(&dev_priv->sb_lock);
5210 
5211 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5212 
5213 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5214 				 desired_divisor << auxdiv);
5215 }
5216 
5217 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5218 					   enum pipe pch_transcoder)
5219 {
5220 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5221 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5222 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5223 
5224 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5225 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5226 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5227 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5228 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5229 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5230 
5231 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5232 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5233 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5234 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5235 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5236 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5237 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5238 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5239 }
5240 
5241 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5242 {
5243 	u32 temp;
5244 
5245 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5246 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5247 		return;
5248 
5249 	drm_WARN_ON(&dev_priv->drm,
5250 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
5251 		    FDI_RX_ENABLE);
5252 	drm_WARN_ON(&dev_priv->drm,
5253 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
5254 		    FDI_RX_ENABLE);
5255 
5256 	temp &= ~FDI_BC_BIFURCATION_SELECT;
5257 	if (enable)
5258 		temp |= FDI_BC_BIFURCATION_SELECT;
5259 
5260 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5261 		    enable ? "en" : "dis");
5262 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5263 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5264 }
5265 
5266 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5267 {
5268 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5269 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5270 
5271 	switch (crtc->pipe) {
5272 	case PIPE_A:
5273 		break;
5274 	case PIPE_B:
5275 		if (crtc_state->fdi_lanes > 2)
5276 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
5277 		else
5278 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
5279 
5280 		break;
5281 	case PIPE_C:
5282 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
5283 
5284 		break;
5285 	default:
5286 		BUG();
5287 	}
5288 }
5289 
5290 /*
5291  * Finds the encoder associated with the given CRTC. This can only be
5292  * used when we know that the CRTC isn't feeding multiple encoders!
5293  */
5294 static struct intel_encoder *
5295 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5296 			   const struct intel_crtc_state *crtc_state)
5297 {
5298 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5299 	const struct drm_connector_state *connector_state;
5300 	const struct drm_connector *connector;
5301 	struct intel_encoder *encoder = NULL;
5302 	int num_encoders = 0;
5303 	int i;
5304 
5305 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5306 		if (connector_state->crtc != &crtc->base)
5307 			continue;
5308 
5309 		encoder = to_intel_encoder(connector_state->best_encoder);
5310 		num_encoders++;
5311 	}
5312 
5313 	drm_WARN(encoder->base.dev, num_encoders != 1,
5314 		 "%d encoders for pipe %c\n",
5315 		 num_encoders, pipe_name(crtc->pipe));
5316 
5317 	return encoder;
5318 }
5319 
5320 /*
5321  * Enable PCH resources required for PCH ports:
5322  *   - PCH PLLs
5323  *   - FDI training & RX/TX
5324  *   - update transcoder timings
5325  *   - DP transcoding bits
5326  *   - transcoder
5327  */
5328 static void ilk_pch_enable(const struct intel_atomic_state *state,
5329 			   const struct intel_crtc_state *crtc_state)
5330 {
5331 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5332 	struct drm_device *dev = crtc->base.dev;
5333 	struct drm_i915_private *dev_priv = to_i915(dev);
5334 	enum pipe pipe = crtc->pipe;
5335 	u32 temp;
5336 
5337 	assert_pch_transcoder_disabled(dev_priv, pipe);
5338 
5339 	if (IS_IVYBRIDGE(dev_priv))
5340 		ivb_update_fdi_bc_bifurcation(crtc_state);
5341 
5342 	/* Write the TU size bits before fdi link training, so that error
5343 	 * detection works. */
5344 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5345 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5346 
5347 	/* For PCH output, training FDI link */
5348 	dev_priv->display.fdi_link_train(crtc, crtc_state);
5349 
5350 	/* We need to program the right clock selection before writing the pixel
5351 	 * mutliplier into the DPLL. */
5352 	if (HAS_PCH_CPT(dev_priv)) {
5353 		u32 sel;
5354 
5355 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5356 		temp |= TRANS_DPLL_ENABLE(pipe);
5357 		sel = TRANS_DPLLB_SEL(pipe);
5358 		if (crtc_state->shared_dpll ==
5359 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5360 			temp |= sel;
5361 		else
5362 			temp &= ~sel;
5363 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5364 	}
5365 
5366 	/* XXX: pch pll's can be enabled any time before we enable the PCH
5367 	 * transcoder, and we actually should do this to not upset any PCH
5368 	 * transcoder that already use the clock when we share it.
5369 	 *
5370 	 * Note that enable_shared_dpll tries to do the right thing, but
5371 	 * get_shared_dpll unconditionally resets the pll - we need that to have
5372 	 * the right LVDS enable sequence. */
5373 	intel_enable_shared_dpll(crtc_state);
5374 
5375 	/* set transcoder timing, panel must allow it */
5376 	assert_panel_unlocked(dev_priv, pipe);
5377 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
5378 
5379 	intel_fdi_normal_train(crtc);
5380 
5381 	/* For PCH DP, enable TRANS_DP_CTL */
5382 	if (HAS_PCH_CPT(dev_priv) &&
5383 	    intel_crtc_has_dp_encoder(crtc_state)) {
5384 		const struct drm_display_mode *adjusted_mode =
5385 			&crtc_state->hw.adjusted_mode;
5386 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5387 		i915_reg_t reg = TRANS_DP_CTL(pipe);
5388 		enum port port;
5389 
5390 		temp = intel_de_read(dev_priv, reg);
5391 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
5392 			  TRANS_DP_SYNC_MASK |
5393 			  TRANS_DP_BPC_MASK);
5394 		temp |= TRANS_DP_OUTPUT_ENABLE;
5395 		temp |= bpc << 9; /* same format but at 11:9 */
5396 
5397 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5398 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5399 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5400 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5401 
5402 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5403 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
5404 		temp |= TRANS_DP_PORT_SEL(port);
5405 
5406 		intel_de_write(dev_priv, reg, temp);
5407 	}
5408 
5409 	ilk_enable_pch_transcoder(crtc_state);
5410 }
5411 
5412 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5413 {
5414 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5415 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5416 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5417 
5418 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5419 
5420 	lpt_program_iclkip(crtc_state);
5421 
5422 	/* Set transcoder timing. */
5423 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5424 
5425 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5426 }
5427 
5428 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5429 			       enum pipe pipe)
5430 {
5431 	i915_reg_t dslreg = PIPEDSL(pipe);
5432 	u32 temp;
5433 
5434 	temp = intel_de_read(dev_priv, dslreg);
5435 	udelay(500);
5436 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5437 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5438 			drm_err(&dev_priv->drm,
5439 				"mode set failed: pipe %c stuck\n",
5440 				pipe_name(pipe));
5441 	}
5442 }
5443 
5444 /*
5445  * The hardware phase 0.0 refers to the center of the pixel.
5446  * We want to start from the top/left edge which is phase
5447  * -0.5. That matches how the hardware calculates the scaling
5448  * factors (from top-left of the first pixel to bottom-right
5449  * of the last pixel, as opposed to the pixel centers).
5450  *
5451  * For 4:2:0 subsampled chroma planes we obviously have to
5452  * adjust that so that the chroma sample position lands in
5453  * the right spot.
5454  *
5455  * Note that for packed YCbCr 4:2:2 formats there is no way to
5456  * control chroma siting. The hardware simply replicates the
5457  * chroma samples for both of the luma samples, and thus we don't
5458  * actually get the expected MPEG2 chroma siting convention :(
5459  * The same behaviour is observed on pre-SKL platforms as well.
5460  *
5461  * Theory behind the formula (note that we ignore sub-pixel
5462  * source coordinates):
5463  * s = source sample position
5464  * d = destination sample position
5465  *
5466  * Downscaling 4:1:
5467  * -0.5
5468  * | 0.0
5469  * | |     1.5 (initial phase)
5470  * | |     |
5471  * v v     v
5472  * | s | s | s | s |
5473  * |       d       |
5474  *
5475  * Upscaling 1:4:
5476  * -0.5
5477  * | -0.375 (initial phase)
5478  * | |     0.0
5479  * | |     |
5480  * v v     v
5481  * |       s       |
5482  * | d | d | d | d |
5483  */
5484 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5485 {
5486 	int phase = -0x8000;
5487 	u16 trip = 0;
5488 
5489 	if (chroma_cosited)
5490 		phase += (sub - 1) * 0x8000 / sub;
5491 
5492 	phase += scale / (2 * sub);
5493 
5494 	/*
5495 	 * Hardware initial phase limited to [-0.5:1.5].
5496 	 * Since the max hardware scale factor is 3.0, we
5497 	 * should never actually excdeed 1.0 here.
5498 	 */
5499 	WARN_ON(phase < -0x8000 || phase > 0x18000);
5500 
5501 	if (phase < 0)
5502 		phase = 0x10000 + phase;
5503 	else
5504 		trip = PS_PHASE_TRIP;
5505 
5506 	return ((phase >> 2) & PS_PHASE_MASK) | trip;
5507 }
5508 
5509 #define SKL_MIN_SRC_W 8
5510 #define SKL_MAX_SRC_W 4096
5511 #define SKL_MIN_SRC_H 8
5512 #define SKL_MAX_SRC_H 4096
5513 #define SKL_MIN_DST_W 8
5514 #define SKL_MAX_DST_W 4096
5515 #define SKL_MIN_DST_H 8
5516 #define SKL_MAX_DST_H 4096
5517 #define ICL_MAX_SRC_W 5120
5518 #define ICL_MAX_SRC_H 4096
5519 #define ICL_MAX_DST_W 5120
5520 #define ICL_MAX_DST_H 4096
5521 #define SKL_MIN_YUV_420_SRC_W 16
5522 #define SKL_MIN_YUV_420_SRC_H 16
5523 
5524 static int
5525 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5526 		  unsigned int scaler_user, int *scaler_id,
5527 		  int src_w, int src_h, int dst_w, int dst_h,
5528 		  const struct drm_format_info *format,
5529 		  u64 modifier, bool need_scaler)
5530 {
5531 	struct intel_crtc_scaler_state *scaler_state =
5532 		&crtc_state->scaler_state;
5533 	struct intel_crtc *intel_crtc =
5534 		to_intel_crtc(crtc_state->uapi.crtc);
5535 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5536 	const struct drm_display_mode *adjusted_mode =
5537 		&crtc_state->hw.adjusted_mode;
5538 
5539 	/*
5540 	 * Src coordinates are already rotated by 270 degrees for
5541 	 * the 90/270 degree plane rotation cases (to match the
5542 	 * GTT mapping), hence no need to account for rotation here.
5543 	 */
5544 	if (src_w != dst_w || src_h != dst_h)
5545 		need_scaler = true;
5546 
5547 	/*
5548 	 * Scaling/fitting not supported in IF-ID mode in GEN9+
5549 	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5550 	 * Once NV12 is enabled, handle it here while allocating scaler
5551 	 * for NV12.
5552 	 */
5553 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
5554 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5555 		drm_dbg_kms(&dev_priv->drm,
5556 			    "Pipe/Plane scaling not supported with IF-ID mode\n");
5557 		return -EINVAL;
5558 	}
5559 
5560 	/*
5561 	 * if plane is being disabled or scaler is no more required or force detach
5562 	 *  - free scaler binded to this plane/crtc
5563 	 *  - in order to do this, update crtc->scaler_usage
5564 	 *
5565 	 * Here scaler state in crtc_state is set free so that
5566 	 * scaler can be assigned to other user. Actual register
5567 	 * update to free the scaler is done in plane/panel-fit programming.
5568 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5569 	 */
5570 	if (force_detach || !need_scaler) {
5571 		if (*scaler_id >= 0) {
5572 			scaler_state->scaler_users &= ~(1 << scaler_user);
5573 			scaler_state->scalers[*scaler_id].in_use = 0;
5574 
5575 			drm_dbg_kms(&dev_priv->drm,
5576 				    "scaler_user index %u.%u: "
5577 				    "Staged freeing scaler id %d scaler_users = 0x%x\n",
5578 				    intel_crtc->pipe, scaler_user, *scaler_id,
5579 				    scaler_state->scaler_users);
5580 			*scaler_id = -1;
5581 		}
5582 		return 0;
5583 	}
5584 
5585 	if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
5586 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5587 		drm_dbg_kms(&dev_priv->drm,
5588 			    "Planar YUV: src dimensions not met\n");
5589 		return -EINVAL;
5590 	}
5591 
5592 	/* range checks */
5593 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5594 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5595 	    (INTEL_GEN(dev_priv) >= 11 &&
5596 	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5597 	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5598 	    (INTEL_GEN(dev_priv) < 11 &&
5599 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5600 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
5601 		drm_dbg_kms(&dev_priv->drm,
5602 			    "scaler_user index %u.%u: src %ux%u dst %ux%u "
5603 			    "size is out of scaler range\n",
5604 			    intel_crtc->pipe, scaler_user, src_w, src_h,
5605 			    dst_w, dst_h);
5606 		return -EINVAL;
5607 	}
5608 
5609 	/* mark this plane as a scaler user in crtc_state */
5610 	scaler_state->scaler_users |= (1 << scaler_user);
5611 	drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
5612 		    "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5613 		    intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5614 		    scaler_state->scaler_users);
5615 
5616 	return 0;
5617 }
5618 
5619 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
5620 {
5621 	const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
5622 	int width, height;
5623 
5624 	if (crtc_state->pch_pfit.enabled) {
5625 		width = drm_rect_width(&crtc_state->pch_pfit.dst);
5626 		height = drm_rect_height(&crtc_state->pch_pfit.dst);
5627 	} else {
5628 		width = pipe_mode->crtc_hdisplay;
5629 		height = pipe_mode->crtc_vdisplay;
5630 	}
5631 	return skl_update_scaler(crtc_state, !crtc_state->hw.active,
5632 				 SKL_CRTC_INDEX,
5633 				 &crtc_state->scaler_state.scaler_id,
5634 				 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
5635 				 width, height, NULL, 0,
5636 				 crtc_state->pch_pfit.enabled);
5637 }
5638 
5639 /**
5640  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5641  * @crtc_state: crtc's scaler state
5642  * @plane_state: atomic plane state to update
5643  *
5644  * Return
5645  *     0 - scaler_usage updated successfully
5646  *    error - requested scaling cannot be supported or other error condition
5647  */
5648 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5649 				   struct intel_plane_state *plane_state)
5650 {
5651 	struct intel_plane *intel_plane =
5652 		to_intel_plane(plane_state->uapi.plane);
5653 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5654 	struct drm_framebuffer *fb = plane_state->hw.fb;
5655 	int ret;
5656 	bool force_detach = !fb || !plane_state->uapi.visible;
5657 	bool need_scaler = false;
5658 
5659 	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5660 	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5661 	    fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
5662 		need_scaler = true;
5663 
5664 	ret = skl_update_scaler(crtc_state, force_detach,
5665 				drm_plane_index(&intel_plane->base),
5666 				&plane_state->scaler_id,
5667 				drm_rect_width(&plane_state->uapi.src) >> 16,
5668 				drm_rect_height(&plane_state->uapi.src) >> 16,
5669 				drm_rect_width(&plane_state->uapi.dst),
5670 				drm_rect_height(&plane_state->uapi.dst),
5671 				fb ? fb->format : NULL,
5672 				fb ? fb->modifier : 0,
5673 				need_scaler);
5674 
5675 	if (ret || plane_state->scaler_id < 0)
5676 		return ret;
5677 
5678 	/* check colorkey */
5679 	if (plane_state->ckey.flags) {
5680 		drm_dbg_kms(&dev_priv->drm,
5681 			    "[PLANE:%d:%s] scaling with color key not allowed",
5682 			    intel_plane->base.base.id,
5683 			    intel_plane->base.name);
5684 		return -EINVAL;
5685 	}
5686 
5687 	/* Check src format */
5688 	switch (fb->format->format) {
5689 	case DRM_FORMAT_RGB565:
5690 	case DRM_FORMAT_XBGR8888:
5691 	case DRM_FORMAT_XRGB8888:
5692 	case DRM_FORMAT_ABGR8888:
5693 	case DRM_FORMAT_ARGB8888:
5694 	case DRM_FORMAT_XRGB2101010:
5695 	case DRM_FORMAT_XBGR2101010:
5696 	case DRM_FORMAT_ARGB2101010:
5697 	case DRM_FORMAT_ABGR2101010:
5698 	case DRM_FORMAT_YUYV:
5699 	case DRM_FORMAT_YVYU:
5700 	case DRM_FORMAT_UYVY:
5701 	case DRM_FORMAT_VYUY:
5702 	case DRM_FORMAT_NV12:
5703 	case DRM_FORMAT_XYUV8888:
5704 	case DRM_FORMAT_P010:
5705 	case DRM_FORMAT_P012:
5706 	case DRM_FORMAT_P016:
5707 	case DRM_FORMAT_Y210:
5708 	case DRM_FORMAT_Y212:
5709 	case DRM_FORMAT_Y216:
5710 	case DRM_FORMAT_XVYU2101010:
5711 	case DRM_FORMAT_XVYU12_16161616:
5712 	case DRM_FORMAT_XVYU16161616:
5713 		break;
5714 	case DRM_FORMAT_XBGR16161616F:
5715 	case DRM_FORMAT_ABGR16161616F:
5716 	case DRM_FORMAT_XRGB16161616F:
5717 	case DRM_FORMAT_ARGB16161616F:
5718 		if (INTEL_GEN(dev_priv) >= 11)
5719 			break;
5720 		fallthrough;
5721 	default:
5722 		drm_dbg_kms(&dev_priv->drm,
5723 			    "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5724 			    intel_plane->base.base.id, intel_plane->base.name,
5725 			    fb->base.id, fb->format->format);
5726 		return -EINVAL;
5727 	}
5728 
5729 	return 0;
5730 }
5731 
5732 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
5733 {
5734 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
5735 	int i;
5736 
5737 	for (i = 0; i < crtc->num_scalers; i++)
5738 		skl_detach_scaler(crtc, i);
5739 }
5740 
5741 static int cnl_coef_tap(int i)
5742 {
5743 	return i % 7;
5744 }
5745 
5746 static u16 cnl_nearest_filter_coef(int t)
5747 {
5748 	return t == 3 ? 0x0800 : 0x3000;
5749 }
5750 
5751 /*
5752  *  Theory behind setting nearest-neighbor integer scaling:
5753  *
5754  *  17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
5755  *  The letter represents the filter tap (D is the center tap) and the number
5756  *  represents the coefficient set for a phase (0-16).
5757  *
5758  *         +------------+------------------------+------------------------+
5759  *         |Index value | Data value coeffient 1 | Data value coeffient 2 |
5760  *         +------------+------------------------+------------------------+
5761  *         |   00h      |          B0            |          A0            |
5762  *         +------------+------------------------+------------------------+
5763  *         |   01h      |          D0            |          C0            |
5764  *         +------------+------------------------+------------------------+
5765  *         |   02h      |          F0            |          E0            |
5766  *         +------------+------------------------+------------------------+
5767  *         |   03h      |          A1            |          G0            |
5768  *         +------------+------------------------+------------------------+
5769  *         |   04h      |          C1            |          B1            |
5770  *         +------------+------------------------+------------------------+
5771  *         |   ...      |          ...           |          ...           |
5772  *         +------------+------------------------+------------------------+
5773  *         |   38h      |          B16           |          A16           |
5774  *         +------------+------------------------+------------------------+
5775  *         |   39h      |          D16           |          C16           |
5776  *         +------------+------------------------+------------------------+
5777  *         |   3Ah      |          F16           |          C16           |
5778  *         +------------+------------------------+------------------------+
5779  *         |   3Bh      |        Reserved        |          G16           |
5780  *         +------------+------------------------+------------------------+
5781  *
5782  *  To enable nearest-neighbor scaling:  program scaler coefficents with
5783  *  the center tap (Dxx) values set to 1 and all other values set to 0 as per
5784  *  SCALER_COEFFICIENT_FORMAT
5785  *
5786  */
5787 
5788 static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
5789 					     enum pipe pipe, int id, int set)
5790 {
5791 	int i;
5792 
5793 	intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
5794 			  PS_COEE_INDEX_AUTO_INC);
5795 
5796 	for (i = 0; i < 17 * 7; i += 2) {
5797 		u32 tmp;
5798 		int t;
5799 
5800 		t = cnl_coef_tap(i);
5801 		tmp = cnl_nearest_filter_coef(t);
5802 
5803 		t = cnl_coef_tap(i + 1);
5804 		tmp |= cnl_nearest_filter_coef(t) << 16;
5805 
5806 		intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
5807 				  tmp);
5808 	}
5809 
5810 	intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
5811 }
5812 
5813 inline u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
5814 {
5815 	if (filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR) {
5816 		return (PS_FILTER_PROGRAMMED |
5817 			PS_Y_VERT_FILTER_SELECT(set) |
5818 			PS_Y_HORZ_FILTER_SELECT(set) |
5819 			PS_UV_VERT_FILTER_SELECT(set) |
5820 			PS_UV_HORZ_FILTER_SELECT(set));
5821 	}
5822 
5823 	return PS_FILTER_MEDIUM;
5824 }
5825 
5826 void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
5827 			     int id, int set, enum drm_scaling_filter filter)
5828 {
5829 	switch (filter) {
5830 	case DRM_SCALING_FILTER_DEFAULT:
5831 		break;
5832 	case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
5833 		cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
5834 		break;
5835 	default:
5836 		MISSING_CASE(filter);
5837 	}
5838 }
5839 
5840 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
5841 {
5842 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5843 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5844 	const struct intel_crtc_scaler_state *scaler_state =
5845 		&crtc_state->scaler_state;
5846 	struct drm_rect src = {
5847 		.x2 = crtc_state->pipe_src_w << 16,
5848 		.y2 = crtc_state->pipe_src_h << 16,
5849 	};
5850 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
5851 	u16 uv_rgb_hphase, uv_rgb_vphase;
5852 	enum pipe pipe = crtc->pipe;
5853 	int width = drm_rect_width(dst);
5854 	int height = drm_rect_height(dst);
5855 	int x = dst->x1;
5856 	int y = dst->y1;
5857 	int hscale, vscale;
5858 	unsigned long irqflags;
5859 	int id;
5860 	u32 ps_ctrl;
5861 
5862 	if (!crtc_state->pch_pfit.enabled)
5863 		return;
5864 
5865 	if (drm_WARN_ON(&dev_priv->drm,
5866 			crtc_state->scaler_state.scaler_id < 0))
5867 		return;
5868 
5869 	hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
5870 	vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
5871 
5872 	uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5873 	uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5874 
5875 	id = scaler_state->scaler_id;
5876 
5877 	ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
5878 	ps_ctrl |=  PS_SCALER_EN | scaler_state->scalers[id].mode;
5879 
5880 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
5881 
5882 	skl_scaler_setup_filter(dev_priv, pipe, id, 0,
5883 				crtc_state->hw.scaling_filter);
5884 
5885 	intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
5886 
5887 	intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
5888 			  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5889 	intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
5890 			  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5891 	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
5892 			  x << 16 | y);
5893 	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
5894 			  width << 16 | height);
5895 
5896 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
5897 }
5898 
5899 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
5900 {
5901 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5902 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5903 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
5904 	enum pipe pipe = crtc->pipe;
5905 	int width = drm_rect_width(dst);
5906 	int height = drm_rect_height(dst);
5907 	int x = dst->x1;
5908 	int y = dst->y1;
5909 
5910 	if (!crtc_state->pch_pfit.enabled)
5911 		return;
5912 
5913 	/* Force use of hard-coded filter coefficients
5914 	 * as some pre-programmed values are broken,
5915 	 * e.g. x201.
5916 	 */
5917 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5918 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
5919 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
5920 	else
5921 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
5922 			       PF_FILTER_MED_3x3);
5923 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
5924 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
5925 }
5926 
5927 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5928 {
5929 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5930 	struct drm_device *dev = crtc->base.dev;
5931 	struct drm_i915_private *dev_priv = to_i915(dev);
5932 
5933 	if (!crtc_state->ips_enabled)
5934 		return;
5935 
5936 	/*
5937 	 * We can only enable IPS after we enable a plane and wait for a vblank
5938 	 * This function is called from post_plane_update, which is run after
5939 	 * a vblank wait.
5940 	 */
5941 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5942 
5943 	if (IS_BROADWELL(dev_priv)) {
5944 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5945 							 IPS_ENABLE | IPS_PCODE_CONTROL));
5946 		/* Quoting Art Runyan: "its not safe to expect any particular
5947 		 * value in IPS_CTL bit 31 after enabling IPS through the
5948 		 * mailbox." Moreover, the mailbox may return a bogus state,
5949 		 * so we need to just enable it and continue on.
5950 		 */
5951 	} else {
5952 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
5953 		/* The bit only becomes 1 in the next vblank, so this wait here
5954 		 * is essentially intel_wait_for_vblank. If we don't have this
5955 		 * and don't wait for vblanks until the end of crtc_enable, then
5956 		 * the HW state readout code will complain that the expected
5957 		 * IPS_CTL value is not the one we read. */
5958 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5959 			drm_err(&dev_priv->drm,
5960 				"Timed out waiting for IPS enable\n");
5961 	}
5962 }
5963 
5964 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5965 {
5966 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5967 	struct drm_device *dev = crtc->base.dev;
5968 	struct drm_i915_private *dev_priv = to_i915(dev);
5969 
5970 	if (!crtc_state->ips_enabled)
5971 		return;
5972 
5973 	if (IS_BROADWELL(dev_priv)) {
5974 		drm_WARN_ON(dev,
5975 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5976 		/*
5977 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
5978 		 * 42ms timeout value leads to occasional timeouts so use 100ms
5979 		 * instead.
5980 		 */
5981 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5982 			drm_err(&dev_priv->drm,
5983 				"Timed out waiting for IPS disable\n");
5984 	} else {
5985 		intel_de_write(dev_priv, IPS_CTL, 0);
5986 		intel_de_posting_read(dev_priv, IPS_CTL);
5987 	}
5988 
5989 	/* We need to wait for a vblank before we can disable the plane. */
5990 	intel_wait_for_vblank(dev_priv, crtc->pipe);
5991 }
5992 
5993 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5994 {
5995 	if (intel_crtc->overlay)
5996 		(void) intel_overlay_switch_off(intel_crtc->overlay);
5997 
5998 	/* Let userspace switch the overlay on again. In most cases userspace
5999 	 * has to recompute where to put it anyway.
6000 	 */
6001 }
6002 
6003 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6004 				       const struct intel_crtc_state *new_crtc_state)
6005 {
6006 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6007 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6008 
6009 	if (!old_crtc_state->ips_enabled)
6010 		return false;
6011 
6012 	if (intel_crtc_needs_modeset(new_crtc_state))
6013 		return true;
6014 
6015 	/*
6016 	 * Workaround : Do not read or write the pipe palette/gamma data while
6017 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6018 	 *
6019 	 * Disable IPS before we program the LUT.
6020 	 */
6021 	if (IS_HASWELL(dev_priv) &&
6022 	    (new_crtc_state->uapi.color_mgmt_changed ||
6023 	     new_crtc_state->update_pipe) &&
6024 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6025 		return true;
6026 
6027 	return !new_crtc_state->ips_enabled;
6028 }
6029 
6030 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6031 				       const struct intel_crtc_state *new_crtc_state)
6032 {
6033 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6034 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6035 
6036 	if (!new_crtc_state->ips_enabled)
6037 		return false;
6038 
6039 	if (intel_crtc_needs_modeset(new_crtc_state))
6040 		return true;
6041 
6042 	/*
6043 	 * Workaround : Do not read or write the pipe palette/gamma data while
6044 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6045 	 *
6046 	 * Re-enable IPS after the LUT has been programmed.
6047 	 */
6048 	if (IS_HASWELL(dev_priv) &&
6049 	    (new_crtc_state->uapi.color_mgmt_changed ||
6050 	     new_crtc_state->update_pipe) &&
6051 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6052 		return true;
6053 
6054 	/*
6055 	 * We can't read out IPS on broadwell, assume the worst and
6056 	 * forcibly enable IPS on the first fastset.
6057 	 */
6058 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
6059 		return true;
6060 
6061 	return !old_crtc_state->ips_enabled;
6062 }
6063 
6064 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6065 {
6066 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6067 
6068 	if (!crtc_state->nv12_planes)
6069 		return false;
6070 
6071 	/* WA Display #0827: Gen9:all */
6072 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6073 		return true;
6074 
6075 	return false;
6076 }
6077 
6078 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6079 {
6080 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6081 
6082 	/* Wa_2006604312:icl,ehl */
6083 	if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
6084 		return true;
6085 
6086 	return false;
6087 }
6088 
6089 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6090 			    const struct intel_crtc_state *new_crtc_state)
6091 {
6092 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
6093 		new_crtc_state->active_planes;
6094 }
6095 
6096 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6097 			     const struct intel_crtc_state *new_crtc_state)
6098 {
6099 	return old_crtc_state->active_planes &&
6100 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
6101 }
6102 
6103 static void intel_post_plane_update(struct intel_atomic_state *state,
6104 				    struct intel_crtc *crtc)
6105 {
6106 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6107 	const struct intel_crtc_state *old_crtc_state =
6108 		intel_atomic_get_old_crtc_state(state, crtc);
6109 	const struct intel_crtc_state *new_crtc_state =
6110 		intel_atomic_get_new_crtc_state(state, crtc);
6111 	enum pipe pipe = crtc->pipe;
6112 
6113 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6114 
6115 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6116 		intel_update_watermarks(crtc);
6117 
6118 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6119 		hsw_enable_ips(new_crtc_state);
6120 
6121 	intel_fbc_post_update(state, crtc);
6122 
6123 	if (needs_nv12_wa(old_crtc_state) &&
6124 	    !needs_nv12_wa(new_crtc_state))
6125 		skl_wa_827(dev_priv, pipe, false);
6126 
6127 	if (needs_scalerclk_wa(old_crtc_state) &&
6128 	    !needs_scalerclk_wa(new_crtc_state))
6129 		icl_wa_scalerclkgating(dev_priv, pipe, false);
6130 }
6131 
6132 static void skl_disable_async_flip_wa(struct intel_atomic_state *state,
6133 				      struct intel_crtc *crtc,
6134 				      const struct intel_crtc_state *new_crtc_state)
6135 {
6136 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6137 	struct intel_plane *plane;
6138 	struct intel_plane_state *new_plane_state;
6139 	int i;
6140 
6141 	for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
6142 		u32 update_mask = new_crtc_state->update_planes;
6143 		u32 plane_ctl, surf_addr;
6144 		enum plane_id plane_id;
6145 		unsigned long irqflags;
6146 		enum pipe pipe;
6147 
6148 		if (crtc->pipe != plane->pipe ||
6149 		    !(update_mask & BIT(plane->id)))
6150 			continue;
6151 
6152 		plane_id = plane->id;
6153 		pipe = plane->pipe;
6154 
6155 		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6156 		plane_ctl = intel_de_read_fw(dev_priv, PLANE_CTL(pipe, plane_id));
6157 		surf_addr = intel_de_read_fw(dev_priv, PLANE_SURF(pipe, plane_id));
6158 
6159 		plane_ctl &= ~PLANE_CTL_ASYNC_FLIP;
6160 
6161 		intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
6162 		intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), surf_addr);
6163 		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6164 	}
6165 
6166 	intel_wait_for_vblank(dev_priv, crtc->pipe);
6167 }
6168 
6169 static void intel_pre_plane_update(struct intel_atomic_state *state,
6170 				   struct intel_crtc *crtc)
6171 {
6172 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6173 	const struct intel_crtc_state *old_crtc_state =
6174 		intel_atomic_get_old_crtc_state(state, crtc);
6175 	const struct intel_crtc_state *new_crtc_state =
6176 		intel_atomic_get_new_crtc_state(state, crtc);
6177 	enum pipe pipe = crtc->pipe;
6178 
6179 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6180 		hsw_disable_ips(old_crtc_state);
6181 
6182 	if (intel_fbc_pre_update(state, crtc))
6183 		intel_wait_for_vblank(dev_priv, pipe);
6184 
6185 	/* Display WA 827 */
6186 	if (!needs_nv12_wa(old_crtc_state) &&
6187 	    needs_nv12_wa(new_crtc_state))
6188 		skl_wa_827(dev_priv, pipe, true);
6189 
6190 	/* Wa_2006604312:icl,ehl */
6191 	if (!needs_scalerclk_wa(old_crtc_state) &&
6192 	    needs_scalerclk_wa(new_crtc_state))
6193 		icl_wa_scalerclkgating(dev_priv, pipe, true);
6194 
6195 	/*
6196 	 * Vblank time updates from the shadow to live plane control register
6197 	 * are blocked if the memory self-refresh mode is active at that
6198 	 * moment. So to make sure the plane gets truly disabled, disable
6199 	 * first the self-refresh mode. The self-refresh enable bit in turn
6200 	 * will be checked/applied by the HW only at the next frame start
6201 	 * event which is after the vblank start event, so we need to have a
6202 	 * wait-for-vblank between disabling the plane and the pipe.
6203 	 */
6204 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6205 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6206 		intel_wait_for_vblank(dev_priv, pipe);
6207 
6208 	/*
6209 	 * IVB workaround: must disable low power watermarks for at least
6210 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
6211 	 * when scaling is disabled.
6212 	 *
6213 	 * WaCxSRDisabledForSpriteScaling:ivb
6214 	 */
6215 	if (old_crtc_state->hw.active &&
6216 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6217 		intel_wait_for_vblank(dev_priv, pipe);
6218 
6219 	/*
6220 	 * If we're doing a modeset we don't need to do any
6221 	 * pre-vblank watermark programming here.
6222 	 */
6223 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
6224 		/*
6225 		 * For platforms that support atomic watermarks, program the
6226 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6227 		 * will be the intermediate values that are safe for both pre- and
6228 		 * post- vblank; when vblank happens, the 'active' values will be set
6229 		 * to the final 'target' values and we'll do this again to get the
6230 		 * optimal watermarks.  For gen9+ platforms, the values we program here
6231 		 * will be the final target values which will get automatically latched
6232 		 * at vblank time; no further programming will be necessary.
6233 		 *
6234 		 * If a platform hasn't been transitioned to atomic watermarks yet,
6235 		 * we'll continue to update watermarks the old way, if flags tell
6236 		 * us to.
6237 		 */
6238 		if (dev_priv->display.initial_watermarks)
6239 			dev_priv->display.initial_watermarks(state, crtc);
6240 		else if (new_crtc_state->update_wm_pre)
6241 			intel_update_watermarks(crtc);
6242 	}
6243 
6244 	/*
6245 	 * Gen2 reports pipe underruns whenever all planes are disabled.
6246 	 * So disable underrun reporting before all the planes get disabled.
6247 	 *
6248 	 * We do this after .initial_watermarks() so that we have a
6249 	 * chance of catching underruns with the intermediate watermarks
6250 	 * vs. the old plane configuration.
6251 	 */
6252 	if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6253 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6254 
6255 	/*
6256 	 * WA for platforms where async address update enable bit
6257 	 * is double buffered and only latched at start of vblank.
6258 	 */
6259 	if (old_crtc_state->uapi.async_flip &&
6260 	    !new_crtc_state->uapi.async_flip &&
6261 	    IS_GEN_RANGE(dev_priv, 9, 10))
6262 		skl_disable_async_flip_wa(state, crtc, new_crtc_state);
6263 }
6264 
6265 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6266 				      struct intel_crtc *crtc)
6267 {
6268 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6269 	const struct intel_crtc_state *new_crtc_state =
6270 		intel_atomic_get_new_crtc_state(state, crtc);
6271 	unsigned int update_mask = new_crtc_state->update_planes;
6272 	const struct intel_plane_state *old_plane_state;
6273 	struct intel_plane *plane;
6274 	unsigned fb_bits = 0;
6275 	int i;
6276 
6277 	intel_crtc_dpms_overlay_disable(crtc);
6278 
6279 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6280 		if (crtc->pipe != plane->pipe ||
6281 		    !(update_mask & BIT(plane->id)))
6282 			continue;
6283 
6284 		intel_disable_plane(plane, new_crtc_state);
6285 
6286 		if (old_plane_state->uapi.visible)
6287 			fb_bits |= plane->frontbuffer_bit;
6288 	}
6289 
6290 	intel_frontbuffer_flip(dev_priv, fb_bits);
6291 }
6292 
6293 /*
6294  * intel_connector_primary_encoder - get the primary encoder for a connector
6295  * @connector: connector for which to return the encoder
6296  *
6297  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6298  * all connectors to their encoder, except for DP-MST connectors which have
6299  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6300  * pointed to by as many DP-MST connectors as there are pipes.
6301  */
6302 static struct intel_encoder *
6303 intel_connector_primary_encoder(struct intel_connector *connector)
6304 {
6305 	struct intel_encoder *encoder;
6306 
6307 	if (connector->mst_port)
6308 		return &dp_to_dig_port(connector->mst_port)->base;
6309 
6310 	encoder = intel_attached_encoder(connector);
6311 	drm_WARN_ON(connector->base.dev, !encoder);
6312 
6313 	return encoder;
6314 }
6315 
6316 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6317 {
6318 	struct drm_connector_state *new_conn_state;
6319 	struct drm_connector *connector;
6320 	int i;
6321 
6322 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6323 					i) {
6324 		struct intel_connector *intel_connector;
6325 		struct intel_encoder *encoder;
6326 		struct intel_crtc *crtc;
6327 
6328 		if (!intel_connector_needs_modeset(state, connector))
6329 			continue;
6330 
6331 		intel_connector = to_intel_connector(connector);
6332 		encoder = intel_connector_primary_encoder(intel_connector);
6333 		if (!encoder->update_prepare)
6334 			continue;
6335 
6336 		crtc = new_conn_state->crtc ?
6337 			to_intel_crtc(new_conn_state->crtc) : NULL;
6338 		encoder->update_prepare(state, encoder, crtc);
6339 	}
6340 }
6341 
6342 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6343 {
6344 	struct drm_connector_state *new_conn_state;
6345 	struct drm_connector *connector;
6346 	int i;
6347 
6348 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6349 					i) {
6350 		struct intel_connector *intel_connector;
6351 		struct intel_encoder *encoder;
6352 		struct intel_crtc *crtc;
6353 
6354 		if (!intel_connector_needs_modeset(state, connector))
6355 			continue;
6356 
6357 		intel_connector = to_intel_connector(connector);
6358 		encoder = intel_connector_primary_encoder(intel_connector);
6359 		if (!encoder->update_complete)
6360 			continue;
6361 
6362 		crtc = new_conn_state->crtc ?
6363 			to_intel_crtc(new_conn_state->crtc) : NULL;
6364 		encoder->update_complete(state, encoder, crtc);
6365 	}
6366 }
6367 
6368 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6369 					  struct intel_crtc *crtc)
6370 {
6371 	const struct intel_crtc_state *crtc_state =
6372 		intel_atomic_get_new_crtc_state(state, crtc);
6373 	const struct drm_connector_state *conn_state;
6374 	struct drm_connector *conn;
6375 	int i;
6376 
6377 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6378 		struct intel_encoder *encoder =
6379 			to_intel_encoder(conn_state->best_encoder);
6380 
6381 		if (conn_state->crtc != &crtc->base)
6382 			continue;
6383 
6384 		if (encoder->pre_pll_enable)
6385 			encoder->pre_pll_enable(state, encoder,
6386 						crtc_state, conn_state);
6387 	}
6388 }
6389 
6390 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6391 				      struct intel_crtc *crtc)
6392 {
6393 	const struct intel_crtc_state *crtc_state =
6394 		intel_atomic_get_new_crtc_state(state, crtc);
6395 	const struct drm_connector_state *conn_state;
6396 	struct drm_connector *conn;
6397 	int i;
6398 
6399 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6400 		struct intel_encoder *encoder =
6401 			to_intel_encoder(conn_state->best_encoder);
6402 
6403 		if (conn_state->crtc != &crtc->base)
6404 			continue;
6405 
6406 		if (encoder->pre_enable)
6407 			encoder->pre_enable(state, encoder,
6408 					    crtc_state, conn_state);
6409 	}
6410 }
6411 
6412 static void intel_encoders_enable(struct intel_atomic_state *state,
6413 				  struct intel_crtc *crtc)
6414 {
6415 	const struct intel_crtc_state *crtc_state =
6416 		intel_atomic_get_new_crtc_state(state, crtc);
6417 	const struct drm_connector_state *conn_state;
6418 	struct drm_connector *conn;
6419 	int i;
6420 
6421 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6422 		struct intel_encoder *encoder =
6423 			to_intel_encoder(conn_state->best_encoder);
6424 
6425 		if (conn_state->crtc != &crtc->base)
6426 			continue;
6427 
6428 		if (encoder->enable)
6429 			encoder->enable(state, encoder,
6430 					crtc_state, conn_state);
6431 		intel_opregion_notify_encoder(encoder, true);
6432 	}
6433 }
6434 
6435 static void intel_encoders_disable(struct intel_atomic_state *state,
6436 				   struct intel_crtc *crtc)
6437 {
6438 	const struct intel_crtc_state *old_crtc_state =
6439 		intel_atomic_get_old_crtc_state(state, crtc);
6440 	const struct drm_connector_state *old_conn_state;
6441 	struct drm_connector *conn;
6442 	int i;
6443 
6444 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6445 		struct intel_encoder *encoder =
6446 			to_intel_encoder(old_conn_state->best_encoder);
6447 
6448 		if (old_conn_state->crtc != &crtc->base)
6449 			continue;
6450 
6451 		intel_opregion_notify_encoder(encoder, false);
6452 		if (encoder->disable)
6453 			encoder->disable(state, encoder,
6454 					 old_crtc_state, old_conn_state);
6455 	}
6456 }
6457 
6458 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6459 					struct intel_crtc *crtc)
6460 {
6461 	const struct intel_crtc_state *old_crtc_state =
6462 		intel_atomic_get_old_crtc_state(state, crtc);
6463 	const struct drm_connector_state *old_conn_state;
6464 	struct drm_connector *conn;
6465 	int i;
6466 
6467 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6468 		struct intel_encoder *encoder =
6469 			to_intel_encoder(old_conn_state->best_encoder);
6470 
6471 		if (old_conn_state->crtc != &crtc->base)
6472 			continue;
6473 
6474 		if (encoder->post_disable)
6475 			encoder->post_disable(state, encoder,
6476 					      old_crtc_state, old_conn_state);
6477 	}
6478 }
6479 
6480 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6481 					    struct intel_crtc *crtc)
6482 {
6483 	const struct intel_crtc_state *old_crtc_state =
6484 		intel_atomic_get_old_crtc_state(state, crtc);
6485 	const struct drm_connector_state *old_conn_state;
6486 	struct drm_connector *conn;
6487 	int i;
6488 
6489 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6490 		struct intel_encoder *encoder =
6491 			to_intel_encoder(old_conn_state->best_encoder);
6492 
6493 		if (old_conn_state->crtc != &crtc->base)
6494 			continue;
6495 
6496 		if (encoder->post_pll_disable)
6497 			encoder->post_pll_disable(state, encoder,
6498 						  old_crtc_state, old_conn_state);
6499 	}
6500 }
6501 
6502 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6503 				       struct intel_crtc *crtc)
6504 {
6505 	const struct intel_crtc_state *crtc_state =
6506 		intel_atomic_get_new_crtc_state(state, crtc);
6507 	const struct drm_connector_state *conn_state;
6508 	struct drm_connector *conn;
6509 	int i;
6510 
6511 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6512 		struct intel_encoder *encoder =
6513 			to_intel_encoder(conn_state->best_encoder);
6514 
6515 		if (conn_state->crtc != &crtc->base)
6516 			continue;
6517 
6518 		if (encoder->update_pipe)
6519 			encoder->update_pipe(state, encoder,
6520 					     crtc_state, conn_state);
6521 	}
6522 }
6523 
6524 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6525 {
6526 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6527 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6528 
6529 	plane->disable_plane(plane, crtc_state);
6530 }
6531 
6532 static void ilk_crtc_enable(struct intel_atomic_state *state,
6533 			    struct intel_crtc *crtc)
6534 {
6535 	const struct intel_crtc_state *new_crtc_state =
6536 		intel_atomic_get_new_crtc_state(state, crtc);
6537 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6538 	enum pipe pipe = crtc->pipe;
6539 
6540 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6541 		return;
6542 
6543 	/*
6544 	 * Sometimes spurious CPU pipe underruns happen during FDI
6545 	 * training, at least with VGA+HDMI cloning. Suppress them.
6546 	 *
6547 	 * On ILK we get an occasional spurious CPU pipe underruns
6548 	 * between eDP port A enable and vdd enable. Also PCH port
6549 	 * enable seems to result in the occasional CPU pipe underrun.
6550 	 *
6551 	 * Spurious PCH underruns also occur during PCH enabling.
6552 	 */
6553 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6554 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6555 
6556 	if (new_crtc_state->has_pch_encoder)
6557 		intel_prepare_shared_dpll(new_crtc_state);
6558 
6559 	if (intel_crtc_has_dp_encoder(new_crtc_state))
6560 		intel_dp_set_m_n(new_crtc_state, M1_N1);
6561 
6562 	intel_set_transcoder_timings(new_crtc_state);
6563 	intel_set_pipe_src_size(new_crtc_state);
6564 
6565 	if (new_crtc_state->has_pch_encoder)
6566 		intel_cpu_transcoder_set_m_n(new_crtc_state,
6567 					     &new_crtc_state->fdi_m_n, NULL);
6568 
6569 	ilk_set_pipeconf(new_crtc_state);
6570 
6571 	crtc->active = true;
6572 
6573 	intel_encoders_pre_enable(state, crtc);
6574 
6575 	if (new_crtc_state->has_pch_encoder) {
6576 		/* Note: FDI PLL enabling _must_ be done before we enable the
6577 		 * cpu pipes, hence this is separate from all the other fdi/pch
6578 		 * enabling. */
6579 		ilk_fdi_pll_enable(new_crtc_state);
6580 	} else {
6581 		assert_fdi_tx_disabled(dev_priv, pipe);
6582 		assert_fdi_rx_disabled(dev_priv, pipe);
6583 	}
6584 
6585 	ilk_pfit_enable(new_crtc_state);
6586 
6587 	/*
6588 	 * On ILK+ LUT must be loaded before the pipe is running but with
6589 	 * clocks enabled
6590 	 */
6591 	intel_color_load_luts(new_crtc_state);
6592 	intel_color_commit(new_crtc_state);
6593 	/* update DSPCNTR to configure gamma for pipe bottom color */
6594 	intel_disable_primary_plane(new_crtc_state);
6595 
6596 	if (dev_priv->display.initial_watermarks)
6597 		dev_priv->display.initial_watermarks(state, crtc);
6598 	intel_enable_pipe(new_crtc_state);
6599 
6600 	if (new_crtc_state->has_pch_encoder)
6601 		ilk_pch_enable(state, new_crtc_state);
6602 
6603 	intel_crtc_vblank_on(new_crtc_state);
6604 
6605 	intel_encoders_enable(state, crtc);
6606 
6607 	if (HAS_PCH_CPT(dev_priv))
6608 		cpt_verify_modeset(dev_priv, pipe);
6609 
6610 	/*
6611 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6612 	 * And a second vblank wait is needed at least on ILK with
6613 	 * some interlaced HDMI modes. Let's do the double wait always
6614 	 * in case there are more corner cases we don't know about.
6615 	 */
6616 	if (new_crtc_state->has_pch_encoder) {
6617 		intel_wait_for_vblank(dev_priv, pipe);
6618 		intel_wait_for_vblank(dev_priv, pipe);
6619 	}
6620 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6621 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6622 }
6623 
6624 /* IPS only exists on ULT machines and is tied to pipe A. */
6625 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6626 {
6627 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6628 }
6629 
6630 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6631 					    enum pipe pipe, bool apply)
6632 {
6633 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
6634 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6635 
6636 	if (apply)
6637 		val |= mask;
6638 	else
6639 		val &= ~mask;
6640 
6641 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
6642 }
6643 
6644 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6645 {
6646 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6647 	enum pipe pipe = crtc->pipe;
6648 	u32 val;
6649 
6650 	val = MBUS_DBOX_A_CREDIT(2);
6651 
6652 	if (INTEL_GEN(dev_priv) >= 12) {
6653 		val |= MBUS_DBOX_BW_CREDIT(2);
6654 		val |= MBUS_DBOX_B_CREDIT(12);
6655 	} else {
6656 		val |= MBUS_DBOX_BW_CREDIT(1);
6657 		val |= MBUS_DBOX_B_CREDIT(8);
6658 	}
6659 
6660 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
6661 }
6662 
6663 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
6664 {
6665 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6666 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6667 
6668 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
6669 		       HSW_LINETIME(crtc_state->linetime) |
6670 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
6671 }
6672 
6673 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6674 {
6675 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6676 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6677 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6678 	u32 val;
6679 
6680 	val = intel_de_read(dev_priv, reg);
6681 	val &= ~HSW_FRAME_START_DELAY_MASK;
6682 	val |= HSW_FRAME_START_DELAY(0);
6683 	intel_de_write(dev_priv, reg, val);
6684 }
6685 
6686 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
6687 					 const struct intel_crtc_state *crtc_state)
6688 {
6689 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
6690 	struct intel_crtc_state *master_crtc_state;
6691 	struct drm_connector_state *conn_state;
6692 	struct drm_connector *conn;
6693 	struct intel_encoder *encoder = NULL;
6694 	int i;
6695 
6696 	if (crtc_state->bigjoiner_slave)
6697 		master = crtc_state->bigjoiner_linked_crtc;
6698 
6699 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
6700 
6701 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6702 		if (conn_state->crtc != &master->base)
6703 			continue;
6704 
6705 		encoder = to_intel_encoder(conn_state->best_encoder);
6706 		break;
6707 	}
6708 
6709 	if (!crtc_state->bigjoiner_slave) {
6710 		/* need to enable VDSC, which we skipped in pre-enable */
6711 		intel_dsc_enable(encoder, crtc_state);
6712 	} else {
6713 		/*
6714 		 * Enable sequence steps 1-7 on bigjoiner master
6715 		 */
6716 		intel_encoders_pre_pll_enable(state, master);
6717 		intel_enable_shared_dpll(master_crtc_state);
6718 		intel_encoders_pre_enable(state, master);
6719 
6720 		/* and DSC on slave */
6721 		intel_dsc_enable(NULL, crtc_state);
6722 	}
6723 }
6724 
6725 static void hsw_crtc_enable(struct intel_atomic_state *state,
6726 			    struct intel_crtc *crtc)
6727 {
6728 	const struct intel_crtc_state *new_crtc_state =
6729 		intel_atomic_get_new_crtc_state(state, crtc);
6730 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6731 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
6732 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
6733 	bool psl_clkgate_wa;
6734 
6735 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6736 		return;
6737 
6738 	if (!new_crtc_state->bigjoiner) {
6739 		intel_encoders_pre_pll_enable(state, crtc);
6740 
6741 		if (new_crtc_state->shared_dpll)
6742 			intel_enable_shared_dpll(new_crtc_state);
6743 
6744 		intel_encoders_pre_enable(state, crtc);
6745 	} else {
6746 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
6747 	}
6748 
6749 	intel_set_pipe_src_size(new_crtc_state);
6750 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6751 		bdw_set_pipemisc(new_crtc_state);
6752 
6753 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
6754 		intel_set_transcoder_timings(new_crtc_state);
6755 
6756 		if (cpu_transcoder != TRANSCODER_EDP)
6757 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
6758 				       new_crtc_state->pixel_multiplier - 1);
6759 
6760 		if (new_crtc_state->has_pch_encoder)
6761 			intel_cpu_transcoder_set_m_n(new_crtc_state,
6762 						     &new_crtc_state->fdi_m_n, NULL);
6763 
6764 		hsw_set_frame_start_delay(new_crtc_state);
6765 	}
6766 
6767 	if (!transcoder_is_dsi(cpu_transcoder))
6768 		hsw_set_pipeconf(new_crtc_state);
6769 
6770 	crtc->active = true;
6771 
6772 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6773 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6774 		new_crtc_state->pch_pfit.enabled;
6775 	if (psl_clkgate_wa)
6776 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6777 
6778 	if (INTEL_GEN(dev_priv) >= 9)
6779 		skl_pfit_enable(new_crtc_state);
6780 	else
6781 		ilk_pfit_enable(new_crtc_state);
6782 
6783 	/*
6784 	 * On ILK+ LUT must be loaded before the pipe is running but with
6785 	 * clocks enabled
6786 	 */
6787 	intel_color_load_luts(new_crtc_state);
6788 	intel_color_commit(new_crtc_state);
6789 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
6790 	if (INTEL_GEN(dev_priv) < 9)
6791 		intel_disable_primary_plane(new_crtc_state);
6792 
6793 	hsw_set_linetime_wm(new_crtc_state);
6794 
6795 	if (INTEL_GEN(dev_priv) >= 11)
6796 		icl_set_pipe_chicken(crtc);
6797 
6798 	if (dev_priv->display.initial_watermarks)
6799 		dev_priv->display.initial_watermarks(state, crtc);
6800 
6801 	if (INTEL_GEN(dev_priv) >= 11)
6802 		icl_pipe_mbus_enable(crtc);
6803 
6804 	if (new_crtc_state->bigjoiner_slave) {
6805 		trace_intel_pipe_enable(crtc);
6806 		intel_crtc_vblank_on(new_crtc_state);
6807 	}
6808 
6809 	intel_encoders_enable(state, crtc);
6810 
6811 	if (psl_clkgate_wa) {
6812 		intel_wait_for_vblank(dev_priv, pipe);
6813 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6814 	}
6815 
6816 	/* If we change the relative order between pipe/planes enabling, we need
6817 	 * to change the workaround. */
6818 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
6819 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6820 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6821 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6822 	}
6823 }
6824 
6825 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6826 {
6827 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6828 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6829 	enum pipe pipe = crtc->pipe;
6830 
6831 	/* To avoid upsetting the power well on haswell only disable the pfit if
6832 	 * it's in use. The hw state code will make sure we get this right. */
6833 	if (!old_crtc_state->pch_pfit.enabled)
6834 		return;
6835 
6836 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
6837 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
6838 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
6839 }
6840 
6841 static void ilk_crtc_disable(struct intel_atomic_state *state,
6842 			     struct intel_crtc *crtc)
6843 {
6844 	const struct intel_crtc_state *old_crtc_state =
6845 		intel_atomic_get_old_crtc_state(state, crtc);
6846 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6847 	enum pipe pipe = crtc->pipe;
6848 
6849 	/*
6850 	 * Sometimes spurious CPU pipe underruns happen when the
6851 	 * pipe is already disabled, but FDI RX/TX is still enabled.
6852 	 * Happens at least with VGA+HDMI cloning. Suppress them.
6853 	 */
6854 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6855 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6856 
6857 	intel_encoders_disable(state, crtc);
6858 
6859 	intel_crtc_vblank_off(old_crtc_state);
6860 
6861 	intel_disable_pipe(old_crtc_state);
6862 
6863 	ilk_pfit_disable(old_crtc_state);
6864 
6865 	if (old_crtc_state->has_pch_encoder)
6866 		ilk_fdi_disable(crtc);
6867 
6868 	intel_encoders_post_disable(state, crtc);
6869 
6870 	if (old_crtc_state->has_pch_encoder) {
6871 		ilk_disable_pch_transcoder(dev_priv, pipe);
6872 
6873 		if (HAS_PCH_CPT(dev_priv)) {
6874 			i915_reg_t reg;
6875 			u32 temp;
6876 
6877 			/* disable TRANS_DP_CTL */
6878 			reg = TRANS_DP_CTL(pipe);
6879 			temp = intel_de_read(dev_priv, reg);
6880 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6881 				  TRANS_DP_PORT_SEL_MASK);
6882 			temp |= TRANS_DP_PORT_SEL_NONE;
6883 			intel_de_write(dev_priv, reg, temp);
6884 
6885 			/* disable DPLL_SEL */
6886 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
6887 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6888 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
6889 		}
6890 
6891 		ilk_fdi_pll_disable(crtc);
6892 	}
6893 
6894 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6895 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6896 }
6897 
6898 static void hsw_crtc_disable(struct intel_atomic_state *state,
6899 			     struct intel_crtc *crtc)
6900 {
6901 	/*
6902 	 * FIXME collapse everything to one hook.
6903 	 * Need care with mst->ddi interactions.
6904 	 */
6905 	intel_encoders_disable(state, crtc);
6906 	intel_encoders_post_disable(state, crtc);
6907 }
6908 
6909 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6910 {
6911 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6912 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6913 
6914 	if (!crtc_state->gmch_pfit.control)
6915 		return;
6916 
6917 	/*
6918 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
6919 	 * according to register description and PRM.
6920 	 */
6921 	drm_WARN_ON(&dev_priv->drm,
6922 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
6923 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
6924 
6925 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
6926 		       crtc_state->gmch_pfit.pgm_ratios);
6927 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
6928 
6929 	/* Border color in case we don't scale up to the full screen. Black by
6930 	 * default, change to something else for debugging. */
6931 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
6932 }
6933 
6934 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6935 {
6936 	if (phy == PHY_NONE)
6937 		return false;
6938 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
6939 		return phy <= PHY_D;
6940 	else if (IS_JSL_EHL(dev_priv))
6941 		return phy <= PHY_C;
6942 	else if (INTEL_GEN(dev_priv) >= 11)
6943 		return phy <= PHY_B;
6944 	else
6945 		return false;
6946 }
6947 
6948 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6949 {
6950 	if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
6951 		return false;
6952 	else if (INTEL_GEN(dev_priv) >= 12)
6953 		return phy >= PHY_D && phy <= PHY_I;
6954 	else if (INTEL_GEN(dev_priv) >= 11 && !IS_JSL_EHL(dev_priv))
6955 		return phy >= PHY_C && phy <= PHY_F;
6956 	else
6957 		return false;
6958 }
6959 
6960 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6961 {
6962 	if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
6963 		return PHY_C + port - PORT_TC1;
6964 	else if (IS_JSL_EHL(i915) && port == PORT_D)
6965 		return PHY_A;
6966 
6967 	return PHY_A + port - PORT_A;
6968 }
6969 
6970 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6971 {
6972 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6973 		return TC_PORT_NONE;
6974 
6975 	if (INTEL_GEN(dev_priv) >= 12)
6976 		return TC_PORT_1 + port - PORT_TC1;
6977 	else
6978 		return TC_PORT_1 + port - PORT_C;
6979 }
6980 
6981 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6982 {
6983 	switch (port) {
6984 	case PORT_A:
6985 		return POWER_DOMAIN_PORT_DDI_A_LANES;
6986 	case PORT_B:
6987 		return POWER_DOMAIN_PORT_DDI_B_LANES;
6988 	case PORT_C:
6989 		return POWER_DOMAIN_PORT_DDI_C_LANES;
6990 	case PORT_D:
6991 		return POWER_DOMAIN_PORT_DDI_D_LANES;
6992 	case PORT_E:
6993 		return POWER_DOMAIN_PORT_DDI_E_LANES;
6994 	case PORT_F:
6995 		return POWER_DOMAIN_PORT_DDI_F_LANES;
6996 	case PORT_G:
6997 		return POWER_DOMAIN_PORT_DDI_G_LANES;
6998 	case PORT_H:
6999 		return POWER_DOMAIN_PORT_DDI_H_LANES;
7000 	case PORT_I:
7001 		return POWER_DOMAIN_PORT_DDI_I_LANES;
7002 	default:
7003 		MISSING_CASE(port);
7004 		return POWER_DOMAIN_PORT_OTHER;
7005 	}
7006 }
7007 
7008 enum intel_display_power_domain
7009 intel_aux_power_domain(struct intel_digital_port *dig_port)
7010 {
7011 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7012 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7013 
7014 	if (intel_phy_is_tc(dev_priv, phy) &&
7015 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
7016 		switch (dig_port->aux_ch) {
7017 		case AUX_CH_C:
7018 			return POWER_DOMAIN_AUX_C_TBT;
7019 		case AUX_CH_D:
7020 			return POWER_DOMAIN_AUX_D_TBT;
7021 		case AUX_CH_E:
7022 			return POWER_DOMAIN_AUX_E_TBT;
7023 		case AUX_CH_F:
7024 			return POWER_DOMAIN_AUX_F_TBT;
7025 		case AUX_CH_G:
7026 			return POWER_DOMAIN_AUX_G_TBT;
7027 		case AUX_CH_H:
7028 			return POWER_DOMAIN_AUX_H_TBT;
7029 		case AUX_CH_I:
7030 			return POWER_DOMAIN_AUX_I_TBT;
7031 		default:
7032 			MISSING_CASE(dig_port->aux_ch);
7033 			return POWER_DOMAIN_AUX_C_TBT;
7034 		}
7035 	}
7036 
7037 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
7038 }
7039 
7040 /*
7041  * Converts aux_ch to power_domain without caring about TBT ports for that use
7042  * intel_aux_power_domain()
7043  */
7044 enum intel_display_power_domain
7045 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
7046 {
7047 	switch (aux_ch) {
7048 	case AUX_CH_A:
7049 		return POWER_DOMAIN_AUX_A;
7050 	case AUX_CH_B:
7051 		return POWER_DOMAIN_AUX_B;
7052 	case AUX_CH_C:
7053 		return POWER_DOMAIN_AUX_C;
7054 	case AUX_CH_D:
7055 		return POWER_DOMAIN_AUX_D;
7056 	case AUX_CH_E:
7057 		return POWER_DOMAIN_AUX_E;
7058 	case AUX_CH_F:
7059 		return POWER_DOMAIN_AUX_F;
7060 	case AUX_CH_G:
7061 		return POWER_DOMAIN_AUX_G;
7062 	case AUX_CH_H:
7063 		return POWER_DOMAIN_AUX_H;
7064 	case AUX_CH_I:
7065 		return POWER_DOMAIN_AUX_I;
7066 	default:
7067 		MISSING_CASE(aux_ch);
7068 		return POWER_DOMAIN_AUX_A;
7069 	}
7070 }
7071 
7072 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7073 {
7074 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7075 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7076 	struct drm_encoder *encoder;
7077 	enum pipe pipe = crtc->pipe;
7078 	u64 mask;
7079 	enum transcoder transcoder = crtc_state->cpu_transcoder;
7080 
7081 	if (!crtc_state->hw.active)
7082 		return 0;
7083 
7084 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7085 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7086 	if (crtc_state->pch_pfit.enabled ||
7087 	    crtc_state->pch_pfit.force_thru)
7088 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7089 
7090 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7091 				  crtc_state->uapi.encoder_mask) {
7092 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7093 
7094 		mask |= BIT_ULL(intel_encoder->power_domain);
7095 	}
7096 
7097 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7098 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7099 
7100 	if (crtc_state->shared_dpll)
7101 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7102 
7103 	if (crtc_state->dsc.compression_enable)
7104 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
7105 
7106 	return mask;
7107 }
7108 
7109 static u64
7110 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7111 {
7112 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7113 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7114 	enum intel_display_power_domain domain;
7115 	u64 domains, new_domains, old_domains;
7116 
7117 	domains = get_crtc_power_domains(crtc_state);
7118 
7119 	new_domains = domains & ~crtc->enabled_power_domains.mask;
7120 	old_domains = crtc->enabled_power_domains.mask & ~domains;
7121 
7122 	for_each_power_domain(domain, new_domains)
7123 		intel_display_power_get_in_set(dev_priv,
7124 					       &crtc->enabled_power_domains,
7125 					       domain);
7126 
7127 	return old_domains;
7128 }
7129 
7130 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
7131 					   u64 domains)
7132 {
7133 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
7134 					    &crtc->enabled_power_domains,
7135 					    domains);
7136 }
7137 
7138 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7139 				   struct intel_crtc *crtc)
7140 {
7141 	const struct intel_crtc_state *new_crtc_state =
7142 		intel_atomic_get_new_crtc_state(state, crtc);
7143 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7144 	enum pipe pipe = crtc->pipe;
7145 
7146 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7147 		return;
7148 
7149 	if (intel_crtc_has_dp_encoder(new_crtc_state))
7150 		intel_dp_set_m_n(new_crtc_state, M1_N1);
7151 
7152 	intel_set_transcoder_timings(new_crtc_state);
7153 	intel_set_pipe_src_size(new_crtc_state);
7154 
7155 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7156 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7157 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7158 	}
7159 
7160 	i9xx_set_pipeconf(new_crtc_state);
7161 
7162 	crtc->active = true;
7163 
7164 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7165 
7166 	intel_encoders_pre_pll_enable(state, crtc);
7167 
7168 	if (IS_CHERRYVIEW(dev_priv)) {
7169 		chv_prepare_pll(crtc, new_crtc_state);
7170 		chv_enable_pll(crtc, new_crtc_state);
7171 	} else {
7172 		vlv_prepare_pll(crtc, new_crtc_state);
7173 		vlv_enable_pll(crtc, new_crtc_state);
7174 	}
7175 
7176 	intel_encoders_pre_enable(state, crtc);
7177 
7178 	i9xx_pfit_enable(new_crtc_state);
7179 
7180 	intel_color_load_luts(new_crtc_state);
7181 	intel_color_commit(new_crtc_state);
7182 	/* update DSPCNTR to configure gamma for pipe bottom color */
7183 	intel_disable_primary_plane(new_crtc_state);
7184 
7185 	dev_priv->display.initial_watermarks(state, crtc);
7186 	intel_enable_pipe(new_crtc_state);
7187 
7188 	intel_crtc_vblank_on(new_crtc_state);
7189 
7190 	intel_encoders_enable(state, crtc);
7191 }
7192 
7193 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7194 {
7195 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7196 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7197 
7198 	intel_de_write(dev_priv, FP0(crtc->pipe),
7199 		       crtc_state->dpll_hw_state.fp0);
7200 	intel_de_write(dev_priv, FP1(crtc->pipe),
7201 		       crtc_state->dpll_hw_state.fp1);
7202 }
7203 
7204 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7205 			     struct intel_crtc *crtc)
7206 {
7207 	const struct intel_crtc_state *new_crtc_state =
7208 		intel_atomic_get_new_crtc_state(state, crtc);
7209 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7210 	enum pipe pipe = crtc->pipe;
7211 
7212 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7213 		return;
7214 
7215 	i9xx_set_pll_dividers(new_crtc_state);
7216 
7217 	if (intel_crtc_has_dp_encoder(new_crtc_state))
7218 		intel_dp_set_m_n(new_crtc_state, M1_N1);
7219 
7220 	intel_set_transcoder_timings(new_crtc_state);
7221 	intel_set_pipe_src_size(new_crtc_state);
7222 
7223 	i9xx_set_pipeconf(new_crtc_state);
7224 
7225 	crtc->active = true;
7226 
7227 	if (!IS_GEN(dev_priv, 2))
7228 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7229 
7230 	intel_encoders_pre_enable(state, crtc);
7231 
7232 	i9xx_enable_pll(crtc, new_crtc_state);
7233 
7234 	i9xx_pfit_enable(new_crtc_state);
7235 
7236 	intel_color_load_luts(new_crtc_state);
7237 	intel_color_commit(new_crtc_state);
7238 	/* update DSPCNTR to configure gamma for pipe bottom color */
7239 	intel_disable_primary_plane(new_crtc_state);
7240 
7241 	if (dev_priv->display.initial_watermarks)
7242 		dev_priv->display.initial_watermarks(state, crtc);
7243 	else
7244 		intel_update_watermarks(crtc);
7245 	intel_enable_pipe(new_crtc_state);
7246 
7247 	intel_crtc_vblank_on(new_crtc_state);
7248 
7249 	intel_encoders_enable(state, crtc);
7250 
7251 	/* prevents spurious underruns */
7252 	if (IS_GEN(dev_priv, 2))
7253 		intel_wait_for_vblank(dev_priv, pipe);
7254 }
7255 
7256 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7257 {
7258 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7259 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7260 
7261 	if (!old_crtc_state->gmch_pfit.control)
7262 		return;
7263 
7264 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7265 
7266 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7267 		    intel_de_read(dev_priv, PFIT_CONTROL));
7268 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
7269 }
7270 
7271 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7272 			      struct intel_crtc *crtc)
7273 {
7274 	struct intel_crtc_state *old_crtc_state =
7275 		intel_atomic_get_old_crtc_state(state, crtc);
7276 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7277 	enum pipe pipe = crtc->pipe;
7278 
7279 	/*
7280 	 * On gen2 planes are double buffered but the pipe isn't, so we must
7281 	 * wait for planes to fully turn off before disabling the pipe.
7282 	 */
7283 	if (IS_GEN(dev_priv, 2))
7284 		intel_wait_for_vblank(dev_priv, pipe);
7285 
7286 	intel_encoders_disable(state, crtc);
7287 
7288 	intel_crtc_vblank_off(old_crtc_state);
7289 
7290 	intel_disable_pipe(old_crtc_state);
7291 
7292 	i9xx_pfit_disable(old_crtc_state);
7293 
7294 	intel_encoders_post_disable(state, crtc);
7295 
7296 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7297 		if (IS_CHERRYVIEW(dev_priv))
7298 			chv_disable_pll(dev_priv, pipe);
7299 		else if (IS_VALLEYVIEW(dev_priv))
7300 			vlv_disable_pll(dev_priv, pipe);
7301 		else
7302 			i9xx_disable_pll(old_crtc_state);
7303 	}
7304 
7305 	intel_encoders_post_pll_disable(state, crtc);
7306 
7307 	if (!IS_GEN(dev_priv, 2))
7308 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7309 
7310 	if (!dev_priv->display.initial_watermarks)
7311 		intel_update_watermarks(crtc);
7312 
7313 	/* clock the pipe down to 640x480@60 to potentially save power */
7314 	if (IS_I830(dev_priv))
7315 		i830_enable_pipe(dev_priv, pipe);
7316 }
7317 
7318 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7319 					struct drm_modeset_acquire_ctx *ctx)
7320 {
7321 	struct intel_encoder *encoder;
7322 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7323 	struct intel_bw_state *bw_state =
7324 		to_intel_bw_state(dev_priv->bw_obj.state);
7325 	struct intel_cdclk_state *cdclk_state =
7326 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7327 	struct intel_dbuf_state *dbuf_state =
7328 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
7329 	struct intel_crtc_state *crtc_state =
7330 		to_intel_crtc_state(crtc->base.state);
7331 	struct intel_plane *plane;
7332 	struct drm_atomic_state *state;
7333 	struct intel_crtc_state *temp_crtc_state;
7334 	enum pipe pipe = crtc->pipe;
7335 	int ret;
7336 
7337 	if (!crtc_state->hw.active)
7338 		return;
7339 
7340 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7341 		const struct intel_plane_state *plane_state =
7342 			to_intel_plane_state(plane->base.state);
7343 
7344 		if (plane_state->uapi.visible)
7345 			intel_plane_disable_noatomic(crtc, plane);
7346 	}
7347 
7348 	state = drm_atomic_state_alloc(&dev_priv->drm);
7349 	if (!state) {
7350 		drm_dbg_kms(&dev_priv->drm,
7351 			    "failed to disable [CRTC:%d:%s], out of memory",
7352 			    crtc->base.base.id, crtc->base.name);
7353 		return;
7354 	}
7355 
7356 	state->acquire_ctx = ctx;
7357 
7358 	/* Everything's already locked, -EDEADLK can't happen. */
7359 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7360 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7361 
7362 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
7363 
7364 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7365 
7366 	drm_atomic_state_put(state);
7367 
7368 	drm_dbg_kms(&dev_priv->drm,
7369 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7370 		    crtc->base.base.id, crtc->base.name);
7371 
7372 	crtc->active = false;
7373 	crtc->base.enabled = false;
7374 
7375 	drm_WARN_ON(&dev_priv->drm,
7376 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7377 	crtc_state->uapi.active = false;
7378 	crtc_state->uapi.connector_mask = 0;
7379 	crtc_state->uapi.encoder_mask = 0;
7380 	intel_crtc_free_hw_state(crtc_state);
7381 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7382 
7383 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7384 		encoder->base.crtc = NULL;
7385 
7386 	intel_fbc_disable(crtc);
7387 	intel_update_watermarks(crtc);
7388 	intel_disable_shared_dpll(crtc_state);
7389 
7390 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
7391 
7392 	dev_priv->active_pipes &= ~BIT(pipe);
7393 	cdclk_state->min_cdclk[pipe] = 0;
7394 	cdclk_state->min_voltage_level[pipe] = 0;
7395 	cdclk_state->active_pipes &= ~BIT(pipe);
7396 
7397 	dbuf_state->active_pipes &= ~BIT(pipe);
7398 
7399 	bw_state->data_rate[pipe] = 0;
7400 	bw_state->num_active_planes[pipe] = 0;
7401 }
7402 
7403 /*
7404  * turn all crtc's off, but do not adjust state
7405  * This has to be paired with a call to intel_modeset_setup_hw_state.
7406  */
7407 int intel_display_suspend(struct drm_device *dev)
7408 {
7409 	struct drm_i915_private *dev_priv = to_i915(dev);
7410 	struct drm_atomic_state *state;
7411 	int ret;
7412 
7413 	state = drm_atomic_helper_suspend(dev);
7414 	ret = PTR_ERR_OR_ZERO(state);
7415 	if (ret)
7416 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7417 			ret);
7418 	else
7419 		dev_priv->modeset_restore_state = state;
7420 	return ret;
7421 }
7422 
7423 void intel_encoder_destroy(struct drm_encoder *encoder)
7424 {
7425 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7426 
7427 	drm_encoder_cleanup(encoder);
7428 	kfree(intel_encoder);
7429 }
7430 
7431 /* Cross check the actual hw state with our own modeset state tracking (and it's
7432  * internal consistency). */
7433 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7434 					 struct drm_connector_state *conn_state)
7435 {
7436 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
7437 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
7438 
7439 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7440 		    connector->base.base.id, connector->base.name);
7441 
7442 	if (connector->get_hw_state(connector)) {
7443 		struct intel_encoder *encoder = intel_attached_encoder(connector);
7444 
7445 		I915_STATE_WARN(!crtc_state,
7446 			 "connector enabled without attached crtc\n");
7447 
7448 		if (!crtc_state)
7449 			return;
7450 
7451 		I915_STATE_WARN(!crtc_state->hw.active,
7452 				"connector is active, but attached crtc isn't\n");
7453 
7454 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7455 			return;
7456 
7457 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7458 			"atomic encoder doesn't match attached encoder\n");
7459 
7460 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7461 			"attached encoder crtc differs from connector crtc\n");
7462 	} else {
7463 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7464 				"attached crtc is active, but connector isn't\n");
7465 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7466 			"best encoder set without crtc!\n");
7467 	}
7468 }
7469 
7470 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7471 {
7472 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7473 		return crtc_state->fdi_lanes;
7474 
7475 	return 0;
7476 }
7477 
7478 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7479 			       struct intel_crtc_state *pipe_config)
7480 {
7481 	struct drm_i915_private *dev_priv = to_i915(dev);
7482 	struct drm_atomic_state *state = pipe_config->uapi.state;
7483 	struct intel_crtc *other_crtc;
7484 	struct intel_crtc_state *other_crtc_state;
7485 
7486 	drm_dbg_kms(&dev_priv->drm,
7487 		    "checking fdi config on pipe %c, lanes %i\n",
7488 		    pipe_name(pipe), pipe_config->fdi_lanes);
7489 	if (pipe_config->fdi_lanes > 4) {
7490 		drm_dbg_kms(&dev_priv->drm,
7491 			    "invalid fdi lane config on pipe %c: %i lanes\n",
7492 			    pipe_name(pipe), pipe_config->fdi_lanes);
7493 		return -EINVAL;
7494 	}
7495 
7496 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7497 		if (pipe_config->fdi_lanes > 2) {
7498 			drm_dbg_kms(&dev_priv->drm,
7499 				    "only 2 lanes on haswell, required: %i lanes\n",
7500 				    pipe_config->fdi_lanes);
7501 			return -EINVAL;
7502 		} else {
7503 			return 0;
7504 		}
7505 	}
7506 
7507 	if (INTEL_NUM_PIPES(dev_priv) == 2)
7508 		return 0;
7509 
7510 	/* Ivybridge 3 pipe is really complicated */
7511 	switch (pipe) {
7512 	case PIPE_A:
7513 		return 0;
7514 	case PIPE_B:
7515 		if (pipe_config->fdi_lanes <= 2)
7516 			return 0;
7517 
7518 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7519 		other_crtc_state =
7520 			intel_atomic_get_crtc_state(state, other_crtc);
7521 		if (IS_ERR(other_crtc_state))
7522 			return PTR_ERR(other_crtc_state);
7523 
7524 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7525 			drm_dbg_kms(&dev_priv->drm,
7526 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
7527 				    pipe_name(pipe), pipe_config->fdi_lanes);
7528 			return -EINVAL;
7529 		}
7530 		return 0;
7531 	case PIPE_C:
7532 		if (pipe_config->fdi_lanes > 2) {
7533 			drm_dbg_kms(&dev_priv->drm,
7534 				    "only 2 lanes on pipe %c: required %i lanes\n",
7535 				    pipe_name(pipe), pipe_config->fdi_lanes);
7536 			return -EINVAL;
7537 		}
7538 
7539 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7540 		other_crtc_state =
7541 			intel_atomic_get_crtc_state(state, other_crtc);
7542 		if (IS_ERR(other_crtc_state))
7543 			return PTR_ERR(other_crtc_state);
7544 
7545 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7546 			drm_dbg_kms(&dev_priv->drm,
7547 				    "fdi link B uses too many lanes to enable link C\n");
7548 			return -EINVAL;
7549 		}
7550 		return 0;
7551 	default:
7552 		BUG();
7553 	}
7554 }
7555 
7556 #define RETRY 1
7557 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7558 				  struct intel_crtc_state *pipe_config)
7559 {
7560 	struct drm_device *dev = intel_crtc->base.dev;
7561 	struct drm_i915_private *i915 = to_i915(dev);
7562 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7563 	int lane, link_bw, fdi_dotclock, ret;
7564 	bool needs_recompute = false;
7565 
7566 retry:
7567 	/* FDI is a binary signal running at ~2.7GHz, encoding
7568 	 * each output octet as 10 bits. The actual frequency
7569 	 * is stored as a divider into a 100MHz clock, and the
7570 	 * mode pixel clock is stored in units of 1KHz.
7571 	 * Hence the bw of each lane in terms of the mode signal
7572 	 * is:
7573 	 */
7574 	link_bw = intel_fdi_link_freq(i915, pipe_config);
7575 
7576 	fdi_dotclock = adjusted_mode->crtc_clock;
7577 
7578 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7579 				      pipe_config->pipe_bpp);
7580 
7581 	pipe_config->fdi_lanes = lane;
7582 
7583 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7584 			       link_bw, &pipe_config->fdi_m_n, false, false);
7585 
7586 	ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7587 	if (ret == -EDEADLK)
7588 		return ret;
7589 
7590 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7591 		pipe_config->pipe_bpp -= 2*3;
7592 		drm_dbg_kms(&i915->drm,
7593 			    "fdi link bw constraint, reducing pipe bpp to %i\n",
7594 			    pipe_config->pipe_bpp);
7595 		needs_recompute = true;
7596 		pipe_config->bw_constrained = true;
7597 
7598 		goto retry;
7599 	}
7600 
7601 	if (needs_recompute)
7602 		return RETRY;
7603 
7604 	return ret;
7605 }
7606 
7607 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7608 {
7609 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7610 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7611 
7612 	/* IPS only exists on ULT machines and is tied to pipe A. */
7613 	if (!hsw_crtc_supports_ips(crtc))
7614 		return false;
7615 
7616 	if (!dev_priv->params.enable_ips)
7617 		return false;
7618 
7619 	if (crtc_state->pipe_bpp > 24)
7620 		return false;
7621 
7622 	/*
7623 	 * We compare against max which means we must take
7624 	 * the increased cdclk requirement into account when
7625 	 * calculating the new cdclk.
7626 	 *
7627 	 * Should measure whether using a lower cdclk w/o IPS
7628 	 */
7629 	if (IS_BROADWELL(dev_priv) &&
7630 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7631 		return false;
7632 
7633 	return true;
7634 }
7635 
7636 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7637 {
7638 	struct drm_i915_private *dev_priv =
7639 		to_i915(crtc_state->uapi.crtc->dev);
7640 	struct intel_atomic_state *state =
7641 		to_intel_atomic_state(crtc_state->uapi.state);
7642 
7643 	crtc_state->ips_enabled = false;
7644 
7645 	if (!hsw_crtc_state_ips_capable(crtc_state))
7646 		return 0;
7647 
7648 	/*
7649 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7650 	 * enabled and disabled dynamically based on package C states,
7651 	 * user space can't make reliable use of the CRCs, so let's just
7652 	 * completely disable it.
7653 	 */
7654 	if (crtc_state->crc_enabled)
7655 		return 0;
7656 
7657 	/* IPS should be fine as long as at least one plane is enabled. */
7658 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7659 		return 0;
7660 
7661 	if (IS_BROADWELL(dev_priv)) {
7662 		const struct intel_cdclk_state *cdclk_state;
7663 
7664 		cdclk_state = intel_atomic_get_cdclk_state(state);
7665 		if (IS_ERR(cdclk_state))
7666 			return PTR_ERR(cdclk_state);
7667 
7668 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7669 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
7670 			return 0;
7671 	}
7672 
7673 	crtc_state->ips_enabled = true;
7674 
7675 	return 0;
7676 }
7677 
7678 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7679 {
7680 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7681 
7682 	/* GDG double wide on either pipe, otherwise pipe A only */
7683 	return INTEL_GEN(dev_priv) < 4 &&
7684 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7685 }
7686 
7687 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
7688 {
7689 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
7690 	unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
7691 
7692 	/*
7693 	 * We only use IF-ID interlacing. If we ever use
7694 	 * PF-ID we'll need to adjust the pixel_rate here.
7695 	 */
7696 
7697 	if (!crtc_state->pch_pfit.enabled)
7698 		return pixel_rate;
7699 
7700 	pipe_w = crtc_state->pipe_src_w;
7701 	pipe_h = crtc_state->pipe_src_h;
7702 
7703 	pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
7704 	pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
7705 
7706 	if (pipe_w < pfit_w)
7707 		pipe_w = pfit_w;
7708 	if (pipe_h < pfit_h)
7709 		pipe_h = pfit_h;
7710 
7711 	if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
7712 			!pfit_w || !pfit_h))
7713 		return pixel_rate;
7714 
7715 	return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7716 		       pfit_w * pfit_h);
7717 }
7718 
7719 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
7720 					 const struct drm_display_mode *timings)
7721 {
7722 	mode->hdisplay = timings->crtc_hdisplay;
7723 	mode->htotal = timings->crtc_htotal;
7724 	mode->hsync_start = timings->crtc_hsync_start;
7725 	mode->hsync_end = timings->crtc_hsync_end;
7726 
7727 	mode->vdisplay = timings->crtc_vdisplay;
7728 	mode->vtotal = timings->crtc_vtotal;
7729 	mode->vsync_start = timings->crtc_vsync_start;
7730 	mode->vsync_end = timings->crtc_vsync_end;
7731 
7732 	mode->flags = timings->flags;
7733 	mode->type = DRM_MODE_TYPE_DRIVER;
7734 
7735 	mode->clock = timings->crtc_clock;
7736 
7737 	drm_mode_set_name(mode);
7738 }
7739 
7740 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7741 {
7742 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7743 
7744 	if (HAS_GMCH(dev_priv))
7745 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
7746 		crtc_state->pixel_rate =
7747 			crtc_state->hw.pipe_mode.crtc_clock;
7748 	else
7749 		crtc_state->pixel_rate =
7750 			ilk_pipe_pixel_rate(crtc_state);
7751 }
7752 
7753 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
7754 {
7755 	struct drm_display_mode *mode = &crtc_state->hw.mode;
7756 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
7757 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
7758 
7759 	drm_mode_copy(pipe_mode, adjusted_mode);
7760 
7761 	if (crtc_state->bigjoiner) {
7762 		/*
7763 		 * transcoder is programmed to the full mode,
7764 		 * but pipe timings are half of the transcoder mode
7765 		 */
7766 		pipe_mode->crtc_hdisplay /= 2;
7767 		pipe_mode->crtc_hblank_start /= 2;
7768 		pipe_mode->crtc_hblank_end /= 2;
7769 		pipe_mode->crtc_hsync_start /= 2;
7770 		pipe_mode->crtc_hsync_end /= 2;
7771 		pipe_mode->crtc_htotal /= 2;
7772 		pipe_mode->crtc_clock /= 2;
7773 	}
7774 
7775 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
7776 	intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
7777 
7778 	intel_crtc_compute_pixel_rate(crtc_state);
7779 
7780 	drm_mode_copy(mode, adjusted_mode);
7781 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
7782 	mode->vdisplay = crtc_state->pipe_src_h;
7783 }
7784 
7785 static void intel_encoder_get_config(struct intel_encoder *encoder,
7786 				     struct intel_crtc_state *crtc_state)
7787 {
7788 	encoder->get_config(encoder, crtc_state);
7789 
7790 	intel_crtc_readout_derived_state(crtc_state);
7791 }
7792 
7793 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7794 				     struct intel_crtc_state *pipe_config)
7795 {
7796 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7797 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
7798 	int clock_limit = dev_priv->max_dotclk_freq;
7799 
7800 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
7801 
7802 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
7803 	if (pipe_config->bigjoiner) {
7804 		pipe_mode->crtc_clock /= 2;
7805 		pipe_mode->crtc_hdisplay /= 2;
7806 		pipe_mode->crtc_hblank_start /= 2;
7807 		pipe_mode->crtc_hblank_end /= 2;
7808 		pipe_mode->crtc_hsync_start /= 2;
7809 		pipe_mode->crtc_hsync_end /= 2;
7810 		pipe_mode->crtc_htotal /= 2;
7811 		pipe_config->pipe_src_w /= 2;
7812 	}
7813 
7814 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
7815 
7816 	if (INTEL_GEN(dev_priv) < 4) {
7817 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7818 
7819 		/*
7820 		 * Enable double wide mode when the dot clock
7821 		 * is > 90% of the (display) core speed.
7822 		 */
7823 		if (intel_crtc_supports_double_wide(crtc) &&
7824 		    pipe_mode->crtc_clock > clock_limit) {
7825 			clock_limit = dev_priv->max_dotclk_freq;
7826 			pipe_config->double_wide = true;
7827 		}
7828 	}
7829 
7830 	if (pipe_mode->crtc_clock > clock_limit) {
7831 		drm_dbg_kms(&dev_priv->drm,
7832 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7833 			    pipe_mode->crtc_clock, clock_limit,
7834 			    yesno(pipe_config->double_wide));
7835 		return -EINVAL;
7836 	}
7837 
7838 	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7839 	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7840 	     pipe_config->hw.ctm) {
7841 		/*
7842 		 * There is only one pipe CSC unit per pipe, and we need that
7843 		 * for output conversion from RGB->YCBCR. So if CTM is already
7844 		 * applied we can't support YCBCR420 output.
7845 		 */
7846 		drm_dbg_kms(&dev_priv->drm,
7847 			    "YCBCR420 and CTM together are not possible\n");
7848 		return -EINVAL;
7849 	}
7850 
7851 	/*
7852 	 * Pipe horizontal size must be even in:
7853 	 * - DVO ganged mode
7854 	 * - LVDS dual channel mode
7855 	 * - Double wide pipe
7856 	 */
7857 	if (pipe_config->pipe_src_w & 1) {
7858 		if (pipe_config->double_wide) {
7859 			drm_dbg_kms(&dev_priv->drm,
7860 				    "Odd pipe source width not supported with double wide pipe\n");
7861 			return -EINVAL;
7862 		}
7863 
7864 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7865 		    intel_is_dual_link_lvds(dev_priv)) {
7866 			drm_dbg_kms(&dev_priv->drm,
7867 				    "Odd pipe source width not supported with dual link LVDS\n");
7868 			return -EINVAL;
7869 		}
7870 	}
7871 
7872 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
7873 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7874 	 */
7875 	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7876 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
7877 		return -EINVAL;
7878 
7879 	intel_crtc_compute_pixel_rate(pipe_config);
7880 
7881 	if (pipe_config->has_pch_encoder)
7882 		return ilk_fdi_compute_config(crtc, pipe_config);
7883 
7884 	return 0;
7885 }
7886 
7887 static void
7888 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7889 {
7890 	while (*num > DATA_LINK_M_N_MASK ||
7891 	       *den > DATA_LINK_M_N_MASK) {
7892 		*num >>= 1;
7893 		*den >>= 1;
7894 	}
7895 }
7896 
7897 static void compute_m_n(unsigned int m, unsigned int n,
7898 			u32 *ret_m, u32 *ret_n,
7899 			bool constant_n)
7900 {
7901 	/*
7902 	 * Several DP dongles in particular seem to be fussy about
7903 	 * too large link M/N values. Give N value as 0x8000 that
7904 	 * should be acceptable by specific devices. 0x8000 is the
7905 	 * specified fixed N value for asynchronous clock mode,
7906 	 * which the devices expect also in synchronous clock mode.
7907 	 */
7908 	if (constant_n)
7909 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
7910 	else
7911 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7912 
7913 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7914 	intel_reduce_m_n_ratio(ret_m, ret_n);
7915 }
7916 
7917 void
7918 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7919 		       int pixel_clock, int link_clock,
7920 		       struct intel_link_m_n *m_n,
7921 		       bool constant_n, bool fec_enable)
7922 {
7923 	u32 data_clock = bits_per_pixel * pixel_clock;
7924 
7925 	if (fec_enable)
7926 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
7927 
7928 	m_n->tu = 64;
7929 	compute_m_n(data_clock,
7930 		    link_clock * nlanes * 8,
7931 		    &m_n->gmch_m, &m_n->gmch_n,
7932 		    constant_n);
7933 
7934 	compute_m_n(pixel_clock, link_clock,
7935 		    &m_n->link_m, &m_n->link_n,
7936 		    constant_n);
7937 }
7938 
7939 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7940 {
7941 	/*
7942 	 * There may be no VBT; and if the BIOS enabled SSC we can
7943 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
7944 	 * BIOS isn't using it, don't assume it will work even if the VBT
7945 	 * indicates as much.
7946 	 */
7947 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7948 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
7949 						       PCH_DREF_CONTROL) &
7950 			DREF_SSC1_ENABLE;
7951 
7952 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7953 			drm_dbg_kms(&dev_priv->drm,
7954 				    "SSC %s by BIOS, overriding VBT which says %s\n",
7955 				    enableddisabled(bios_lvds_use_ssc),
7956 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
7957 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7958 		}
7959 	}
7960 }
7961 
7962 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7963 {
7964 	if (dev_priv->params.panel_use_ssc >= 0)
7965 		return dev_priv->params.panel_use_ssc != 0;
7966 	return dev_priv->vbt.lvds_use_ssc
7967 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7968 }
7969 
7970 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7971 {
7972 	return (1 << dpll->n) << 16 | dpll->m2;
7973 }
7974 
7975 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7976 {
7977 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7978 }
7979 
7980 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7981 				     struct intel_crtc_state *crtc_state,
7982 				     struct dpll *reduced_clock)
7983 {
7984 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7985 	u32 fp, fp2 = 0;
7986 
7987 	if (IS_PINEVIEW(dev_priv)) {
7988 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7989 		if (reduced_clock)
7990 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7991 	} else {
7992 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7993 		if (reduced_clock)
7994 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7995 	}
7996 
7997 	crtc_state->dpll_hw_state.fp0 = fp;
7998 
7999 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8000 	    reduced_clock) {
8001 		crtc_state->dpll_hw_state.fp1 = fp2;
8002 	} else {
8003 		crtc_state->dpll_hw_state.fp1 = fp;
8004 	}
8005 }
8006 
8007 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8008 		pipe)
8009 {
8010 	u32 reg_val;
8011 
8012 	/*
8013 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
8014 	 * and set it to a reasonable value instead.
8015 	 */
8016 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8017 	reg_val &= 0xffffff00;
8018 	reg_val |= 0x00000030;
8019 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8020 
8021 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8022 	reg_val &= 0x00ffffff;
8023 	reg_val |= 0x8c000000;
8024 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8025 
8026 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8027 	reg_val &= 0xffffff00;
8028 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8029 
8030 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8031 	reg_val &= 0x00ffffff;
8032 	reg_val |= 0xb0000000;
8033 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8034 }
8035 
8036 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8037 					 const struct intel_link_m_n *m_n)
8038 {
8039 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8040 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8041 	enum pipe pipe = crtc->pipe;
8042 
8043 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8044 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
8045 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8046 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8047 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8048 }
8049 
8050 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8051 				 enum transcoder transcoder)
8052 {
8053 	if (IS_HASWELL(dev_priv))
8054 		return transcoder == TRANSCODER_EDP;
8055 
8056 	/*
8057 	 * Strictly speaking some registers are available before
8058 	 * gen7, but we only support DRRS on gen7+
8059 	 */
8060 	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8061 }
8062 
8063 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8064 					 const struct intel_link_m_n *m_n,
8065 					 const struct intel_link_m_n *m2_n2)
8066 {
8067 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8068 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8069 	enum pipe pipe = crtc->pipe;
8070 	enum transcoder transcoder = crtc_state->cpu_transcoder;
8071 
8072 	if (INTEL_GEN(dev_priv) >= 5) {
8073 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8074 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
8075 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8076 			       m_n->gmch_n);
8077 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8078 			       m_n->link_m);
8079 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8080 			       m_n->link_n);
8081 		/*
8082 		 *  M2_N2 registers are set only if DRRS is supported
8083 		 * (to make sure the registers are not unnecessarily accessed).
8084 		 */
8085 		if (m2_n2 && crtc_state->has_drrs &&
8086 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
8087 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8088 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8089 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8090 				       m2_n2->gmch_n);
8091 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8092 				       m2_n2->link_m);
8093 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8094 				       m2_n2->link_n);
8095 		}
8096 	} else {
8097 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8098 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
8099 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8100 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8101 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8102 	}
8103 }
8104 
8105 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8106 {
8107 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8108 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8109 
8110 	if (m_n == M1_N1) {
8111 		dp_m_n = &crtc_state->dp_m_n;
8112 		dp_m2_n2 = &crtc_state->dp_m2_n2;
8113 	} else if (m_n == M2_N2) {
8114 
8115 		/*
8116 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
8117 		 * needs to be programmed into M1_N1.
8118 		 */
8119 		dp_m_n = &crtc_state->dp_m2_n2;
8120 	} else {
8121 		drm_err(&i915->drm, "Unsupported divider value\n");
8122 		return;
8123 	}
8124 
8125 	if (crtc_state->has_pch_encoder)
8126 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8127 	else
8128 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8129 }
8130 
8131 static void vlv_compute_dpll(struct intel_crtc *crtc,
8132 			     struct intel_crtc_state *pipe_config)
8133 {
8134 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8135 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8136 	if (crtc->pipe != PIPE_A)
8137 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8138 
8139 	/* DPLL not used with DSI, but still need the rest set up */
8140 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8141 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8142 			DPLL_EXT_BUFFER_ENABLE_VLV;
8143 
8144 	pipe_config->dpll_hw_state.dpll_md =
8145 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8146 }
8147 
8148 static void chv_compute_dpll(struct intel_crtc *crtc,
8149 			     struct intel_crtc_state *pipe_config)
8150 {
8151 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8152 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8153 	if (crtc->pipe != PIPE_A)
8154 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8155 
8156 	/* DPLL not used with DSI, but still need the rest set up */
8157 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8158 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8159 
8160 	pipe_config->dpll_hw_state.dpll_md =
8161 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8162 }
8163 
8164 static void vlv_prepare_pll(struct intel_crtc *crtc,
8165 			    const struct intel_crtc_state *pipe_config)
8166 {
8167 	struct drm_device *dev = crtc->base.dev;
8168 	struct drm_i915_private *dev_priv = to_i915(dev);
8169 	enum pipe pipe = crtc->pipe;
8170 	u32 mdiv;
8171 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
8172 	u32 coreclk, reg_val;
8173 
8174 	/* Enable Refclk */
8175 	intel_de_write(dev_priv, DPLL(pipe),
8176 		       pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8177 
8178 	/* No need to actually set up the DPLL with DSI */
8179 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8180 		return;
8181 
8182 	vlv_dpio_get(dev_priv);
8183 
8184 	bestn = pipe_config->dpll.n;
8185 	bestm1 = pipe_config->dpll.m1;
8186 	bestm2 = pipe_config->dpll.m2;
8187 	bestp1 = pipe_config->dpll.p1;
8188 	bestp2 = pipe_config->dpll.p2;
8189 
8190 	/* See eDP HDMI DPIO driver vbios notes doc */
8191 
8192 	/* PLL B needs special handling */
8193 	if (pipe == PIPE_B)
8194 		vlv_pllb_recal_opamp(dev_priv, pipe);
8195 
8196 	/* Set up Tx target for periodic Rcomp update */
8197 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8198 
8199 	/* Disable target IRef on PLL */
8200 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8201 	reg_val &= 0x00ffffff;
8202 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8203 
8204 	/* Disable fast lock */
8205 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8206 
8207 	/* Set idtafcrecal before PLL is enabled */
8208 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8209 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8210 	mdiv |= ((bestn << DPIO_N_SHIFT));
8211 	mdiv |= (1 << DPIO_K_SHIFT);
8212 
8213 	/*
8214 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8215 	 * but we don't support that).
8216 	 * Note: don't use the DAC post divider as it seems unstable.
8217 	 */
8218 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8219 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8220 
8221 	mdiv |= DPIO_ENABLE_CALIBRATION;
8222 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8223 
8224 	/* Set HBR and RBR LPF coefficients */
8225 	if (pipe_config->port_clock == 162000 ||
8226 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8227 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8228 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8229 				 0x009f0003);
8230 	else
8231 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8232 				 0x00d0000f);
8233 
8234 	if (intel_crtc_has_dp_encoder(pipe_config)) {
8235 		/* Use SSC source */
8236 		if (pipe == PIPE_A)
8237 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8238 					 0x0df40000);
8239 		else
8240 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8241 					 0x0df70000);
8242 	} else { /* HDMI or VGA */
8243 		/* Use bend source */
8244 		if (pipe == PIPE_A)
8245 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8246 					 0x0df70000);
8247 		else
8248 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8249 					 0x0df40000);
8250 	}
8251 
8252 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8253 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8254 	if (intel_crtc_has_dp_encoder(pipe_config))
8255 		coreclk |= 0x01000000;
8256 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8257 
8258 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8259 
8260 	vlv_dpio_put(dev_priv);
8261 }
8262 
8263 static void chv_prepare_pll(struct intel_crtc *crtc,
8264 			    const struct intel_crtc_state *pipe_config)
8265 {
8266 	struct drm_device *dev = crtc->base.dev;
8267 	struct drm_i915_private *dev_priv = to_i915(dev);
8268 	enum pipe pipe = crtc->pipe;
8269 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8270 	u32 loopfilter, tribuf_calcntr;
8271 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8272 	u32 dpio_val;
8273 	int vco;
8274 
8275 	/* Enable Refclk and SSC */
8276 	intel_de_write(dev_priv, DPLL(pipe),
8277 		       pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8278 
8279 	/* No need to actually set up the DPLL with DSI */
8280 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8281 		return;
8282 
8283 	bestn = pipe_config->dpll.n;
8284 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8285 	bestm1 = pipe_config->dpll.m1;
8286 	bestm2 = pipe_config->dpll.m2 >> 22;
8287 	bestp1 = pipe_config->dpll.p1;
8288 	bestp2 = pipe_config->dpll.p2;
8289 	vco = pipe_config->dpll.vco;
8290 	dpio_val = 0;
8291 	loopfilter = 0;
8292 
8293 	vlv_dpio_get(dev_priv);
8294 
8295 	/* p1 and p2 divider */
8296 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8297 			5 << DPIO_CHV_S1_DIV_SHIFT |
8298 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8299 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8300 			1 << DPIO_CHV_K_DIV_SHIFT);
8301 
8302 	/* Feedback post-divider - m2 */
8303 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8304 
8305 	/* Feedback refclk divider - n and m1 */
8306 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8307 			DPIO_CHV_M1_DIV_BY_2 |
8308 			1 << DPIO_CHV_N_DIV_SHIFT);
8309 
8310 	/* M2 fraction division */
8311 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8312 
8313 	/* M2 fraction division enable */
8314 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8315 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8316 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8317 	if (bestm2_frac)
8318 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8319 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8320 
8321 	/* Program digital lock detect threshold */
8322 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8323 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8324 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8325 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8326 	if (!bestm2_frac)
8327 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8328 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8329 
8330 	/* Loop filter */
8331 	if (vco == 5400000) {
8332 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8333 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8334 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8335 		tribuf_calcntr = 0x9;
8336 	} else if (vco <= 6200000) {
8337 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8338 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8339 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8340 		tribuf_calcntr = 0x9;
8341 	} else if (vco <= 6480000) {
8342 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8343 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8344 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8345 		tribuf_calcntr = 0x8;
8346 	} else {
8347 		/* Not supported. Apply the same limits as in the max case */
8348 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8349 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8350 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8351 		tribuf_calcntr = 0;
8352 	}
8353 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8354 
8355 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8356 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8357 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8358 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8359 
8360 	/* AFC Recal */
8361 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8362 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8363 			DPIO_AFC_RECAL);
8364 
8365 	vlv_dpio_put(dev_priv);
8366 }
8367 
8368 /**
8369  * vlv_force_pll_on - forcibly enable just the PLL
8370  * @dev_priv: i915 private structure
8371  * @pipe: pipe PLL to enable
8372  * @dpll: PLL configuration
8373  *
8374  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8375  * in cases where we need the PLL enabled even when @pipe is not going to
8376  * be enabled.
8377  */
8378 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8379 		     const struct dpll *dpll)
8380 {
8381 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8382 	struct intel_crtc_state *pipe_config;
8383 
8384 	pipe_config = intel_crtc_state_alloc(crtc);
8385 	if (!pipe_config)
8386 		return -ENOMEM;
8387 
8388 	pipe_config->cpu_transcoder = (enum transcoder)pipe;
8389 	pipe_config->pixel_multiplier = 1;
8390 	pipe_config->dpll = *dpll;
8391 
8392 	if (IS_CHERRYVIEW(dev_priv)) {
8393 		chv_compute_dpll(crtc, pipe_config);
8394 		chv_prepare_pll(crtc, pipe_config);
8395 		chv_enable_pll(crtc, pipe_config);
8396 	} else {
8397 		vlv_compute_dpll(crtc, pipe_config);
8398 		vlv_prepare_pll(crtc, pipe_config);
8399 		vlv_enable_pll(crtc, pipe_config);
8400 	}
8401 
8402 	kfree(pipe_config);
8403 
8404 	return 0;
8405 }
8406 
8407 /**
8408  * vlv_force_pll_off - forcibly disable just the PLL
8409  * @dev_priv: i915 private structure
8410  * @pipe: pipe PLL to disable
8411  *
8412  * Disable the PLL for @pipe. To be used in cases where we need
8413  * the PLL enabled even when @pipe is not going to be enabled.
8414  */
8415 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8416 {
8417 	if (IS_CHERRYVIEW(dev_priv))
8418 		chv_disable_pll(dev_priv, pipe);
8419 	else
8420 		vlv_disable_pll(dev_priv, pipe);
8421 }
8422 
8423 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8424 			      struct intel_crtc_state *crtc_state,
8425 			      struct dpll *reduced_clock)
8426 {
8427 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8428 	u32 dpll;
8429 	struct dpll *clock = &crtc_state->dpll;
8430 
8431 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8432 
8433 	dpll = DPLL_VGA_MODE_DIS;
8434 
8435 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8436 		dpll |= DPLLB_MODE_LVDS;
8437 	else
8438 		dpll |= DPLLB_MODE_DAC_SERIAL;
8439 
8440 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8441 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8442 		dpll |= (crtc_state->pixel_multiplier - 1)
8443 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
8444 	}
8445 
8446 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8447 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8448 		dpll |= DPLL_SDVO_HIGH_SPEED;
8449 
8450 	if (intel_crtc_has_dp_encoder(crtc_state))
8451 		dpll |= DPLL_SDVO_HIGH_SPEED;
8452 
8453 	/* compute bitmask from p1 value */
8454 	if (IS_PINEVIEW(dev_priv))
8455 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8456 	else {
8457 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8458 		if (IS_G4X(dev_priv) && reduced_clock)
8459 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8460 	}
8461 	switch (clock->p2) {
8462 	case 5:
8463 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8464 		break;
8465 	case 7:
8466 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8467 		break;
8468 	case 10:
8469 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8470 		break;
8471 	case 14:
8472 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8473 		break;
8474 	}
8475 	if (INTEL_GEN(dev_priv) >= 4)
8476 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8477 
8478 	if (crtc_state->sdvo_tv_clock)
8479 		dpll |= PLL_REF_INPUT_TVCLKINBC;
8480 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8481 		 intel_panel_use_ssc(dev_priv))
8482 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8483 	else
8484 		dpll |= PLL_REF_INPUT_DREFCLK;
8485 
8486 	dpll |= DPLL_VCO_ENABLE;
8487 	crtc_state->dpll_hw_state.dpll = dpll;
8488 
8489 	if (INTEL_GEN(dev_priv) >= 4) {
8490 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8491 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
8492 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
8493 	}
8494 }
8495 
8496 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8497 			      struct intel_crtc_state *crtc_state,
8498 			      struct dpll *reduced_clock)
8499 {
8500 	struct drm_device *dev = crtc->base.dev;
8501 	struct drm_i915_private *dev_priv = to_i915(dev);
8502 	u32 dpll;
8503 	struct dpll *clock = &crtc_state->dpll;
8504 
8505 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8506 
8507 	dpll = DPLL_VGA_MODE_DIS;
8508 
8509 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8510 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8511 	} else {
8512 		if (clock->p1 == 2)
8513 			dpll |= PLL_P1_DIVIDE_BY_TWO;
8514 		else
8515 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8516 		if (clock->p2 == 4)
8517 			dpll |= PLL_P2_DIVIDE_BY_4;
8518 	}
8519 
8520 	/*
8521 	 * Bspec:
8522 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8523 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8524 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8525 	 *  Enable) must be set to “1” in both the DPLL A Control Register
8526 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8527 	 *
8528 	 * For simplicity We simply keep both bits always enabled in
8529 	 * both DPLLS. The spec says we should disable the DVO 2X clock
8530 	 * when not needed, but this seems to work fine in practice.
8531 	 */
8532 	if (IS_I830(dev_priv) ||
8533 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8534 		dpll |= DPLL_DVO_2X_MODE;
8535 
8536 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8537 	    intel_panel_use_ssc(dev_priv))
8538 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8539 	else
8540 		dpll |= PLL_REF_INPUT_DREFCLK;
8541 
8542 	dpll |= DPLL_VCO_ENABLE;
8543 	crtc_state->dpll_hw_state.dpll = dpll;
8544 }
8545 
8546 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
8547 {
8548 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8549 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8550 	enum pipe pipe = crtc->pipe;
8551 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8552 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8553 	u32 crtc_vtotal, crtc_vblank_end;
8554 	int vsyncshift = 0;
8555 
8556 	/* We need to be careful not to changed the adjusted mode, for otherwise
8557 	 * the hw state checker will get angry at the mismatch. */
8558 	crtc_vtotal = adjusted_mode->crtc_vtotal;
8559 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8560 
8561 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8562 		/* the chip adds 2 halflines automatically */
8563 		crtc_vtotal -= 1;
8564 		crtc_vblank_end -= 1;
8565 
8566 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8567 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8568 		else
8569 			vsyncshift = adjusted_mode->crtc_hsync_start -
8570 				adjusted_mode->crtc_htotal / 2;
8571 		if (vsyncshift < 0)
8572 			vsyncshift += adjusted_mode->crtc_htotal;
8573 	}
8574 
8575 	if (INTEL_GEN(dev_priv) > 3)
8576 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8577 		               vsyncshift);
8578 
8579 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8580 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8581 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8582 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8583 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8584 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8585 
8586 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8587 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8588 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8589 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8590 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8591 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8592 
8593 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8594 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8595 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8596 	 * bits. */
8597 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8598 	    (pipe == PIPE_B || pipe == PIPE_C))
8599 		intel_de_write(dev_priv, VTOTAL(pipe),
8600 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8601 
8602 }
8603 
8604 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8605 {
8606 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8607 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8608 	enum pipe pipe = crtc->pipe;
8609 
8610 	/* pipesrc controls the size that is scaled from, which should
8611 	 * always be the user's requested size.
8612 	 */
8613 	intel_de_write(dev_priv, PIPESRC(pipe),
8614 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8615 }
8616 
8617 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8618 {
8619 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8620 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8621 
8622 	if (IS_GEN(dev_priv, 2))
8623 		return false;
8624 
8625 	if (INTEL_GEN(dev_priv) >= 9 ||
8626 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8627 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8628 	else
8629 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8630 }
8631 
8632 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
8633 					 struct intel_crtc_state *pipe_config)
8634 {
8635 	struct drm_device *dev = crtc->base.dev;
8636 	struct drm_i915_private *dev_priv = to_i915(dev);
8637 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8638 	u32 tmp;
8639 
8640 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8641 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8642 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8643 
8644 	if (!transcoder_is_dsi(cpu_transcoder)) {
8645 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8646 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
8647 							(tmp & 0xffff) + 1;
8648 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
8649 						((tmp >> 16) & 0xffff) + 1;
8650 	}
8651 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8652 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8653 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8654 
8655 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8656 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8657 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8658 
8659 	if (!transcoder_is_dsi(cpu_transcoder)) {
8660 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8661 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
8662 							(tmp & 0xffff) + 1;
8663 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
8664 						((tmp >> 16) & 0xffff) + 1;
8665 	}
8666 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8667 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8668 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8669 
8670 	if (intel_pipe_is_interlaced(pipe_config)) {
8671 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8672 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8673 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8674 	}
8675 }
8676 
8677 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8678 				    struct intel_crtc_state *pipe_config)
8679 {
8680 	struct drm_device *dev = crtc->base.dev;
8681 	struct drm_i915_private *dev_priv = to_i915(dev);
8682 	u32 tmp;
8683 
8684 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8685 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8686 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8687 }
8688 
8689 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8690 {
8691 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8692 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8693 	u32 pipeconf;
8694 
8695 	pipeconf = 0;
8696 
8697 	/* we keep both pipes enabled on 830 */
8698 	if (IS_I830(dev_priv))
8699 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8700 
8701 	if (crtc_state->double_wide)
8702 		pipeconf |= PIPECONF_DOUBLE_WIDE;
8703 
8704 	/* only g4x and later have fancy bpc/dither controls */
8705 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8706 	    IS_CHERRYVIEW(dev_priv)) {
8707 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
8708 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8709 			pipeconf |= PIPECONF_DITHER_EN |
8710 				    PIPECONF_DITHER_TYPE_SP;
8711 
8712 		switch (crtc_state->pipe_bpp) {
8713 		case 18:
8714 			pipeconf |= PIPECONF_6BPC;
8715 			break;
8716 		case 24:
8717 			pipeconf |= PIPECONF_8BPC;
8718 			break;
8719 		case 30:
8720 			pipeconf |= PIPECONF_10BPC;
8721 			break;
8722 		default:
8723 			/* Case prevented by intel_choose_pipe_bpp_dither. */
8724 			BUG();
8725 		}
8726 	}
8727 
8728 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8729 		if (INTEL_GEN(dev_priv) < 4 ||
8730 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8731 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8732 		else
8733 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8734 	} else {
8735 		pipeconf |= PIPECONF_PROGRESSIVE;
8736 	}
8737 
8738 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8739 	     crtc_state->limited_color_range)
8740 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8741 
8742 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8743 
8744 	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8745 
8746 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
8747 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
8748 }
8749 
8750 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8751 				   struct intel_crtc_state *crtc_state)
8752 {
8753 	struct drm_device *dev = crtc->base.dev;
8754 	struct drm_i915_private *dev_priv = to_i915(dev);
8755 	const struct intel_limit *limit;
8756 	int refclk = 48000;
8757 
8758 	memset(&crtc_state->dpll_hw_state, 0,
8759 	       sizeof(crtc_state->dpll_hw_state));
8760 
8761 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8762 		if (intel_panel_use_ssc(dev_priv)) {
8763 			refclk = dev_priv->vbt.lvds_ssc_freq;
8764 			drm_dbg_kms(&dev_priv->drm,
8765 				    "using SSC reference clock of %d kHz\n",
8766 				    refclk);
8767 		}
8768 
8769 		limit = &intel_limits_i8xx_lvds;
8770 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8771 		limit = &intel_limits_i8xx_dvo;
8772 	} else {
8773 		limit = &intel_limits_i8xx_dac;
8774 	}
8775 
8776 	if (!crtc_state->clock_set &&
8777 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8778 				 refclk, NULL, &crtc_state->dpll)) {
8779 		drm_err(&dev_priv->drm,
8780 			"Couldn't find PLL settings for mode!\n");
8781 		return -EINVAL;
8782 	}
8783 
8784 	i8xx_compute_dpll(crtc, crtc_state, NULL);
8785 
8786 	return 0;
8787 }
8788 
8789 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8790 				  struct intel_crtc_state *crtc_state)
8791 {
8792 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8793 	const struct intel_limit *limit;
8794 	int refclk = 96000;
8795 
8796 	memset(&crtc_state->dpll_hw_state, 0,
8797 	       sizeof(crtc_state->dpll_hw_state));
8798 
8799 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8800 		if (intel_panel_use_ssc(dev_priv)) {
8801 			refclk = dev_priv->vbt.lvds_ssc_freq;
8802 			drm_dbg_kms(&dev_priv->drm,
8803 				    "using SSC reference clock of %d kHz\n",
8804 				    refclk);
8805 		}
8806 
8807 		if (intel_is_dual_link_lvds(dev_priv))
8808 			limit = &intel_limits_g4x_dual_channel_lvds;
8809 		else
8810 			limit = &intel_limits_g4x_single_channel_lvds;
8811 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8812 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8813 		limit = &intel_limits_g4x_hdmi;
8814 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8815 		limit = &intel_limits_g4x_sdvo;
8816 	} else {
8817 		/* The option is for other outputs */
8818 		limit = &intel_limits_i9xx_sdvo;
8819 	}
8820 
8821 	if (!crtc_state->clock_set &&
8822 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8823 				refclk, NULL, &crtc_state->dpll)) {
8824 		drm_err(&dev_priv->drm,
8825 			"Couldn't find PLL settings for mode!\n");
8826 		return -EINVAL;
8827 	}
8828 
8829 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8830 
8831 	return 0;
8832 }
8833 
8834 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8835 				  struct intel_crtc_state *crtc_state)
8836 {
8837 	struct drm_device *dev = crtc->base.dev;
8838 	struct drm_i915_private *dev_priv = to_i915(dev);
8839 	const struct intel_limit *limit;
8840 	int refclk = 96000;
8841 
8842 	memset(&crtc_state->dpll_hw_state, 0,
8843 	       sizeof(crtc_state->dpll_hw_state));
8844 
8845 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8846 		if (intel_panel_use_ssc(dev_priv)) {
8847 			refclk = dev_priv->vbt.lvds_ssc_freq;
8848 			drm_dbg_kms(&dev_priv->drm,
8849 				    "using SSC reference clock of %d kHz\n",
8850 				    refclk);
8851 		}
8852 
8853 		limit = &pnv_limits_lvds;
8854 	} else {
8855 		limit = &pnv_limits_sdvo;
8856 	}
8857 
8858 	if (!crtc_state->clock_set &&
8859 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8860 				refclk, NULL, &crtc_state->dpll)) {
8861 		drm_err(&dev_priv->drm,
8862 			"Couldn't find PLL settings for mode!\n");
8863 		return -EINVAL;
8864 	}
8865 
8866 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8867 
8868 	return 0;
8869 }
8870 
8871 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8872 				   struct intel_crtc_state *crtc_state)
8873 {
8874 	struct drm_device *dev = crtc->base.dev;
8875 	struct drm_i915_private *dev_priv = to_i915(dev);
8876 	const struct intel_limit *limit;
8877 	int refclk = 96000;
8878 
8879 	memset(&crtc_state->dpll_hw_state, 0,
8880 	       sizeof(crtc_state->dpll_hw_state));
8881 
8882 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8883 		if (intel_panel_use_ssc(dev_priv)) {
8884 			refclk = dev_priv->vbt.lvds_ssc_freq;
8885 			drm_dbg_kms(&dev_priv->drm,
8886 				    "using SSC reference clock of %d kHz\n",
8887 				    refclk);
8888 		}
8889 
8890 		limit = &intel_limits_i9xx_lvds;
8891 	} else {
8892 		limit = &intel_limits_i9xx_sdvo;
8893 	}
8894 
8895 	if (!crtc_state->clock_set &&
8896 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8897 				 refclk, NULL, &crtc_state->dpll)) {
8898 		drm_err(&dev_priv->drm,
8899 			"Couldn't find PLL settings for mode!\n");
8900 		return -EINVAL;
8901 	}
8902 
8903 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8904 
8905 	return 0;
8906 }
8907 
8908 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8909 				  struct intel_crtc_state *crtc_state)
8910 {
8911 	int refclk = 100000;
8912 	const struct intel_limit *limit = &intel_limits_chv;
8913 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8914 
8915 	memset(&crtc_state->dpll_hw_state, 0,
8916 	       sizeof(crtc_state->dpll_hw_state));
8917 
8918 	if (!crtc_state->clock_set &&
8919 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8920 				refclk, NULL, &crtc_state->dpll)) {
8921 		drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
8922 		return -EINVAL;
8923 	}
8924 
8925 	chv_compute_dpll(crtc, crtc_state);
8926 
8927 	return 0;
8928 }
8929 
8930 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8931 				  struct intel_crtc_state *crtc_state)
8932 {
8933 	int refclk = 100000;
8934 	const struct intel_limit *limit = &intel_limits_vlv;
8935 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8936 
8937 	memset(&crtc_state->dpll_hw_state, 0,
8938 	       sizeof(crtc_state->dpll_hw_state));
8939 
8940 	if (!crtc_state->clock_set &&
8941 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8942 				refclk, NULL, &crtc_state->dpll)) {
8943 		drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
8944 		return -EINVAL;
8945 	}
8946 
8947 	vlv_compute_dpll(crtc, crtc_state);
8948 
8949 	return 0;
8950 }
8951 
8952 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8953 {
8954 	if (IS_I830(dev_priv))
8955 		return false;
8956 
8957 	return INTEL_GEN(dev_priv) >= 4 ||
8958 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8959 }
8960 
8961 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
8962 {
8963 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8964 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8965 	u32 tmp;
8966 
8967 	if (!i9xx_has_pfit(dev_priv))
8968 		return;
8969 
8970 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
8971 	if (!(tmp & PFIT_ENABLE))
8972 		return;
8973 
8974 	/* Check whether the pfit is attached to our pipe. */
8975 	if (INTEL_GEN(dev_priv) < 4) {
8976 		if (crtc->pipe != PIPE_B)
8977 			return;
8978 	} else {
8979 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8980 			return;
8981 	}
8982 
8983 	crtc_state->gmch_pfit.control = tmp;
8984 	crtc_state->gmch_pfit.pgm_ratios =
8985 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
8986 }
8987 
8988 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8989 			       struct intel_crtc_state *pipe_config)
8990 {
8991 	struct drm_device *dev = crtc->base.dev;
8992 	struct drm_i915_private *dev_priv = to_i915(dev);
8993 	enum pipe pipe = crtc->pipe;
8994 	struct dpll clock;
8995 	u32 mdiv;
8996 	int refclk = 100000;
8997 
8998 	/* In case of DSI, DPLL will not be used */
8999 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9000 		return;
9001 
9002 	vlv_dpio_get(dev_priv);
9003 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9004 	vlv_dpio_put(dev_priv);
9005 
9006 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9007 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
9008 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9009 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9010 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9011 
9012 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9013 }
9014 
9015 static void
9016 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9017 			      struct intel_initial_plane_config *plane_config)
9018 {
9019 	struct drm_device *dev = crtc->base.dev;
9020 	struct drm_i915_private *dev_priv = to_i915(dev);
9021 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9022 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9023 	enum pipe pipe;
9024 	u32 val, base, offset;
9025 	int fourcc, pixel_format;
9026 	unsigned int aligned_height;
9027 	struct drm_framebuffer *fb;
9028 	struct intel_framebuffer *intel_fb;
9029 
9030 	if (!plane->get_hw_state(plane, &pipe))
9031 		return;
9032 
9033 	drm_WARN_ON(dev, pipe != crtc->pipe);
9034 
9035 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9036 	if (!intel_fb) {
9037 		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9038 		return;
9039 	}
9040 
9041 	fb = &intel_fb->base;
9042 
9043 	fb->dev = dev;
9044 
9045 	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9046 
9047 	if (INTEL_GEN(dev_priv) >= 4) {
9048 		if (val & DISPPLANE_TILED) {
9049 			plane_config->tiling = I915_TILING_X;
9050 			fb->modifier = I915_FORMAT_MOD_X_TILED;
9051 		}
9052 
9053 		if (val & DISPPLANE_ROTATE_180)
9054 			plane_config->rotation = DRM_MODE_ROTATE_180;
9055 	}
9056 
9057 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9058 	    val & DISPPLANE_MIRROR)
9059 		plane_config->rotation |= DRM_MODE_REFLECT_X;
9060 
9061 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9062 	fourcc = i9xx_format_to_fourcc(pixel_format);
9063 	fb->format = drm_format_info(fourcc);
9064 
9065 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9066 		offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9067 		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9068 	} else if (INTEL_GEN(dev_priv) >= 4) {
9069 		if (plane_config->tiling)
9070 			offset = intel_de_read(dev_priv,
9071 					       DSPTILEOFF(i9xx_plane));
9072 		else
9073 			offset = intel_de_read(dev_priv,
9074 					       DSPLINOFF(i9xx_plane));
9075 		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9076 	} else {
9077 		base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9078 	}
9079 	plane_config->base = base;
9080 
9081 	val = intel_de_read(dev_priv, PIPESRC(pipe));
9082 	fb->width = ((val >> 16) & 0xfff) + 1;
9083 	fb->height = ((val >> 0) & 0xfff) + 1;
9084 
9085 	val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9086 	fb->pitches[0] = val & 0xffffffc0;
9087 
9088 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
9089 
9090 	plane_config->size = fb->pitches[0] * aligned_height;
9091 
9092 	drm_dbg_kms(&dev_priv->drm,
9093 		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9094 		    crtc->base.name, plane->base.name, fb->width, fb->height,
9095 		    fb->format->cpp[0] * 8, base, fb->pitches[0],
9096 		    plane_config->size);
9097 
9098 	plane_config->fb = intel_fb;
9099 }
9100 
9101 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9102 			       struct intel_crtc_state *pipe_config)
9103 {
9104 	struct drm_device *dev = crtc->base.dev;
9105 	struct drm_i915_private *dev_priv = to_i915(dev);
9106 	enum pipe pipe = crtc->pipe;
9107 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
9108 	struct dpll clock;
9109 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9110 	int refclk = 100000;
9111 
9112 	/* In case of DSI, DPLL will not be used */
9113 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9114 		return;
9115 
9116 	vlv_dpio_get(dev_priv);
9117 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9118 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9119 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9120 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9121 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9122 	vlv_dpio_put(dev_priv);
9123 
9124 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9125 	clock.m2 = (pll_dw0 & 0xff) << 22;
9126 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9127 		clock.m2 |= pll_dw2 & 0x3fffff;
9128 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9129 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9130 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9131 
9132 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9133 }
9134 
9135 static enum intel_output_format
9136 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9137 {
9138 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9139 	u32 tmp;
9140 
9141 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9142 
9143 	if (tmp & PIPEMISC_YUV420_ENABLE) {
9144 		/* We support 4:2:0 in full blend mode only */
9145 		drm_WARN_ON(&dev_priv->drm,
9146 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9147 
9148 		return INTEL_OUTPUT_FORMAT_YCBCR420;
9149 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9150 		return INTEL_OUTPUT_FORMAT_YCBCR444;
9151 	} else {
9152 		return INTEL_OUTPUT_FORMAT_RGB;
9153 	}
9154 }
9155 
9156 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9157 {
9158 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9159 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9160 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9161 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9162 	u32 tmp;
9163 
9164 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9165 
9166 	if (tmp & DISPPLANE_GAMMA_ENABLE)
9167 		crtc_state->gamma_enable = true;
9168 
9169 	if (!HAS_GMCH(dev_priv) &&
9170 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
9171 		crtc_state->csc_enable = true;
9172 }
9173 
9174 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9175 				 struct intel_crtc_state *pipe_config)
9176 {
9177 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9178 	enum intel_display_power_domain power_domain;
9179 	intel_wakeref_t wakeref;
9180 	u32 tmp;
9181 	bool ret;
9182 
9183 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9184 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9185 	if (!wakeref)
9186 		return false;
9187 
9188 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9189 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9190 	pipe_config->shared_dpll = NULL;
9191 
9192 	ret = false;
9193 
9194 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9195 	if (!(tmp & PIPECONF_ENABLE))
9196 		goto out;
9197 
9198 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9199 	    IS_CHERRYVIEW(dev_priv)) {
9200 		switch (tmp & PIPECONF_BPC_MASK) {
9201 		case PIPECONF_6BPC:
9202 			pipe_config->pipe_bpp = 18;
9203 			break;
9204 		case PIPECONF_8BPC:
9205 			pipe_config->pipe_bpp = 24;
9206 			break;
9207 		case PIPECONF_10BPC:
9208 			pipe_config->pipe_bpp = 30;
9209 			break;
9210 		default:
9211 			break;
9212 		}
9213 	}
9214 
9215 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9216 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
9217 		pipe_config->limited_color_range = true;
9218 
9219 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9220 		PIPECONF_GAMMA_MODE_SHIFT;
9221 
9222 	if (IS_CHERRYVIEW(dev_priv))
9223 		pipe_config->cgm_mode = intel_de_read(dev_priv,
9224 						      CGM_PIPE_MODE(crtc->pipe));
9225 
9226 	i9xx_get_pipe_color_config(pipe_config);
9227 	intel_color_get_config(pipe_config);
9228 
9229 	if (INTEL_GEN(dev_priv) < 4)
9230 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9231 
9232 	intel_get_transcoder_timings(crtc, pipe_config);
9233 	intel_get_pipe_src_size(crtc, pipe_config);
9234 
9235 	i9xx_get_pfit_config(pipe_config);
9236 
9237 	if (INTEL_GEN(dev_priv) >= 4) {
9238 		/* No way to read it out on pipes B and C */
9239 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9240 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
9241 		else
9242 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9243 		pipe_config->pixel_multiplier =
9244 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9245 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9246 		pipe_config->dpll_hw_state.dpll_md = tmp;
9247 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9248 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9249 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9250 		pipe_config->pixel_multiplier =
9251 			((tmp & SDVO_MULTIPLIER_MASK)
9252 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9253 	} else {
9254 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
9255 		 * port and will be fixed up in the encoder->get_config
9256 		 * function. */
9257 		pipe_config->pixel_multiplier = 1;
9258 	}
9259 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9260 							DPLL(crtc->pipe));
9261 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9262 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9263 							       FP0(crtc->pipe));
9264 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9265 							       FP1(crtc->pipe));
9266 	} else {
9267 		/* Mask out read-only status bits. */
9268 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9269 						     DPLL_PORTC_READY_MASK |
9270 						     DPLL_PORTB_READY_MASK);
9271 	}
9272 
9273 	if (IS_CHERRYVIEW(dev_priv))
9274 		chv_crtc_clock_get(crtc, pipe_config);
9275 	else if (IS_VALLEYVIEW(dev_priv))
9276 		vlv_crtc_clock_get(crtc, pipe_config);
9277 	else
9278 		i9xx_crtc_clock_get(crtc, pipe_config);
9279 
9280 	/*
9281 	 * Normally the dotclock is filled in by the encoder .get_config()
9282 	 * but in case the pipe is enabled w/o any ports we need a sane
9283 	 * default.
9284 	 */
9285 	pipe_config->hw.adjusted_mode.crtc_clock =
9286 		pipe_config->port_clock / pipe_config->pixel_multiplier;
9287 
9288 	ret = true;
9289 
9290 out:
9291 	intel_display_power_put(dev_priv, power_domain, wakeref);
9292 
9293 	return ret;
9294 }
9295 
9296 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9297 {
9298 	struct intel_encoder *encoder;
9299 	int i;
9300 	u32 val, final;
9301 	bool has_lvds = false;
9302 	bool has_cpu_edp = false;
9303 	bool has_panel = false;
9304 	bool has_ck505 = false;
9305 	bool can_ssc = false;
9306 	bool using_ssc_source = false;
9307 
9308 	/* We need to take the global config into account */
9309 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9310 		switch (encoder->type) {
9311 		case INTEL_OUTPUT_LVDS:
9312 			has_panel = true;
9313 			has_lvds = true;
9314 			break;
9315 		case INTEL_OUTPUT_EDP:
9316 			has_panel = true;
9317 			if (encoder->port == PORT_A)
9318 				has_cpu_edp = true;
9319 			break;
9320 		default:
9321 			break;
9322 		}
9323 	}
9324 
9325 	if (HAS_PCH_IBX(dev_priv)) {
9326 		has_ck505 = dev_priv->vbt.display_clock_mode;
9327 		can_ssc = has_ck505;
9328 	} else {
9329 		has_ck505 = false;
9330 		can_ssc = true;
9331 	}
9332 
9333 	/* Check if any DPLLs are using the SSC source */
9334 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
9335 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9336 
9337 		if (!(temp & DPLL_VCO_ENABLE))
9338 			continue;
9339 
9340 		if ((temp & PLL_REF_INPUT_MASK) ==
9341 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9342 			using_ssc_source = true;
9343 			break;
9344 		}
9345 	}
9346 
9347 	drm_dbg_kms(&dev_priv->drm,
9348 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9349 		    has_panel, has_lvds, has_ck505, using_ssc_source);
9350 
9351 	/* Ironlake: try to setup display ref clock before DPLL
9352 	 * enabling. This is only under driver's control after
9353 	 * PCH B stepping, previous chipset stepping should be
9354 	 * ignoring this setting.
9355 	 */
9356 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9357 
9358 	/* As we must carefully and slowly disable/enable each source in turn,
9359 	 * compute the final state we want first and check if we need to
9360 	 * make any changes at all.
9361 	 */
9362 	final = val;
9363 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
9364 	if (has_ck505)
9365 		final |= DREF_NONSPREAD_CK505_ENABLE;
9366 	else
9367 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
9368 
9369 	final &= ~DREF_SSC_SOURCE_MASK;
9370 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9371 	final &= ~DREF_SSC1_ENABLE;
9372 
9373 	if (has_panel) {
9374 		final |= DREF_SSC_SOURCE_ENABLE;
9375 
9376 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
9377 			final |= DREF_SSC1_ENABLE;
9378 
9379 		if (has_cpu_edp) {
9380 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
9381 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9382 			else
9383 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9384 		} else
9385 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9386 	} else if (using_ssc_source) {
9387 		final |= DREF_SSC_SOURCE_ENABLE;
9388 		final |= DREF_SSC1_ENABLE;
9389 	}
9390 
9391 	if (final == val)
9392 		return;
9393 
9394 	/* Always enable nonspread source */
9395 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
9396 
9397 	if (has_ck505)
9398 		val |= DREF_NONSPREAD_CK505_ENABLE;
9399 	else
9400 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
9401 
9402 	if (has_panel) {
9403 		val &= ~DREF_SSC_SOURCE_MASK;
9404 		val |= DREF_SSC_SOURCE_ENABLE;
9405 
9406 		/* SSC must be turned on before enabling the CPU output  */
9407 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9408 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9409 			val |= DREF_SSC1_ENABLE;
9410 		} else
9411 			val &= ~DREF_SSC1_ENABLE;
9412 
9413 		/* Get SSC going before enabling the outputs */
9414 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9415 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9416 		udelay(200);
9417 
9418 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9419 
9420 		/* Enable CPU source on CPU attached eDP */
9421 		if (has_cpu_edp) {
9422 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9423 				drm_dbg_kms(&dev_priv->drm,
9424 					    "Using SSC on eDP\n");
9425 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9426 			} else
9427 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9428 		} else
9429 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9430 
9431 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9432 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9433 		udelay(200);
9434 	} else {
9435 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9436 
9437 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9438 
9439 		/* Turn off CPU output */
9440 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9441 
9442 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9443 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9444 		udelay(200);
9445 
9446 		if (!using_ssc_source) {
9447 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9448 
9449 			/* Turn off the SSC source */
9450 			val &= ~DREF_SSC_SOURCE_MASK;
9451 			val |= DREF_SSC_SOURCE_DISABLE;
9452 
9453 			/* Turn off SSC1 */
9454 			val &= ~DREF_SSC1_ENABLE;
9455 
9456 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9457 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9458 			udelay(200);
9459 		}
9460 	}
9461 
9462 	BUG_ON(val != final);
9463 }
9464 
9465 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9466 {
9467 	u32 tmp;
9468 
9469 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9470 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9471 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9472 
9473 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9474 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9475 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9476 
9477 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9478 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9479 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9480 
9481 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9482 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9483 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9484 }
9485 
9486 /* WaMPhyProgramming:hsw */
9487 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9488 {
9489 	u32 tmp;
9490 
9491 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9492 	tmp &= ~(0xFF << 24);
9493 	tmp |= (0x12 << 24);
9494 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9495 
9496 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9497 	tmp |= (1 << 11);
9498 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9499 
9500 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9501 	tmp |= (1 << 11);
9502 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9503 
9504 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9505 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9506 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9507 
9508 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9509 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9510 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9511 
9512 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9513 	tmp &= ~(7 << 13);
9514 	tmp |= (5 << 13);
9515 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9516 
9517 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9518 	tmp &= ~(7 << 13);
9519 	tmp |= (5 << 13);
9520 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9521 
9522 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9523 	tmp &= ~0xFF;
9524 	tmp |= 0x1C;
9525 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9526 
9527 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9528 	tmp &= ~0xFF;
9529 	tmp |= 0x1C;
9530 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9531 
9532 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9533 	tmp &= ~(0xFF << 16);
9534 	tmp |= (0x1C << 16);
9535 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9536 
9537 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9538 	tmp &= ~(0xFF << 16);
9539 	tmp |= (0x1C << 16);
9540 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9541 
9542 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9543 	tmp |= (1 << 27);
9544 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9545 
9546 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9547 	tmp |= (1 << 27);
9548 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9549 
9550 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9551 	tmp &= ~(0xF << 28);
9552 	tmp |= (4 << 28);
9553 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9554 
9555 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9556 	tmp &= ~(0xF << 28);
9557 	tmp |= (4 << 28);
9558 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9559 }
9560 
9561 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9562  * Programming" based on the parameters passed:
9563  * - Sequence to enable CLKOUT_DP
9564  * - Sequence to enable CLKOUT_DP without spread
9565  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9566  */
9567 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9568 				 bool with_spread, bool with_fdi)
9569 {
9570 	u32 reg, tmp;
9571 
9572 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
9573 		     "FDI requires downspread\n"))
9574 		with_spread = true;
9575 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
9576 		     with_fdi, "LP PCH doesn't have FDI\n"))
9577 		with_fdi = false;
9578 
9579 	mutex_lock(&dev_priv->sb_lock);
9580 
9581 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9582 	tmp &= ~SBI_SSCCTL_DISABLE;
9583 	tmp |= SBI_SSCCTL_PATHALT;
9584 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9585 
9586 	udelay(24);
9587 
9588 	if (with_spread) {
9589 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9590 		tmp &= ~SBI_SSCCTL_PATHALT;
9591 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9592 
9593 		if (with_fdi) {
9594 			lpt_reset_fdi_mphy(dev_priv);
9595 			lpt_program_fdi_mphy(dev_priv);
9596 		}
9597 	}
9598 
9599 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9600 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9601 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9602 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9603 
9604 	mutex_unlock(&dev_priv->sb_lock);
9605 }
9606 
9607 /* Sequence to disable CLKOUT_DP */
9608 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9609 {
9610 	u32 reg, tmp;
9611 
9612 	mutex_lock(&dev_priv->sb_lock);
9613 
9614 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9615 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9616 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9617 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9618 
9619 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9620 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
9621 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
9622 			tmp |= SBI_SSCCTL_PATHALT;
9623 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9624 			udelay(32);
9625 		}
9626 		tmp |= SBI_SSCCTL_DISABLE;
9627 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9628 	}
9629 
9630 	mutex_unlock(&dev_priv->sb_lock);
9631 }
9632 
9633 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9634 
9635 static const u16 sscdivintphase[] = {
9636 	[BEND_IDX( 50)] = 0x3B23,
9637 	[BEND_IDX( 45)] = 0x3B23,
9638 	[BEND_IDX( 40)] = 0x3C23,
9639 	[BEND_IDX( 35)] = 0x3C23,
9640 	[BEND_IDX( 30)] = 0x3D23,
9641 	[BEND_IDX( 25)] = 0x3D23,
9642 	[BEND_IDX( 20)] = 0x3E23,
9643 	[BEND_IDX( 15)] = 0x3E23,
9644 	[BEND_IDX( 10)] = 0x3F23,
9645 	[BEND_IDX(  5)] = 0x3F23,
9646 	[BEND_IDX(  0)] = 0x0025,
9647 	[BEND_IDX( -5)] = 0x0025,
9648 	[BEND_IDX(-10)] = 0x0125,
9649 	[BEND_IDX(-15)] = 0x0125,
9650 	[BEND_IDX(-20)] = 0x0225,
9651 	[BEND_IDX(-25)] = 0x0225,
9652 	[BEND_IDX(-30)] = 0x0325,
9653 	[BEND_IDX(-35)] = 0x0325,
9654 	[BEND_IDX(-40)] = 0x0425,
9655 	[BEND_IDX(-45)] = 0x0425,
9656 	[BEND_IDX(-50)] = 0x0525,
9657 };
9658 
9659 /*
9660  * Bend CLKOUT_DP
9661  * steps -50 to 50 inclusive, in steps of 5
9662  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9663  * change in clock period = -(steps / 10) * 5.787 ps
9664  */
9665 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9666 {
9667 	u32 tmp;
9668 	int idx = BEND_IDX(steps);
9669 
9670 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
9671 		return;
9672 
9673 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
9674 		return;
9675 
9676 	mutex_lock(&dev_priv->sb_lock);
9677 
9678 	if (steps % 10 != 0)
9679 		tmp = 0xAAAAAAAB;
9680 	else
9681 		tmp = 0x00000000;
9682 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9683 
9684 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9685 	tmp &= 0xffff0000;
9686 	tmp |= sscdivintphase[idx];
9687 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9688 
9689 	mutex_unlock(&dev_priv->sb_lock);
9690 }
9691 
9692 #undef BEND_IDX
9693 
9694 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9695 {
9696 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9697 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9698 
9699 	if ((ctl & SPLL_PLL_ENABLE) == 0)
9700 		return false;
9701 
9702 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9703 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9704 		return true;
9705 
9706 	if (IS_BROADWELL(dev_priv) &&
9707 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9708 		return true;
9709 
9710 	return false;
9711 }
9712 
9713 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9714 			       enum intel_dpll_id id)
9715 {
9716 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9717 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
9718 
9719 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
9720 		return false;
9721 
9722 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9723 		return true;
9724 
9725 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9726 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9727 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9728 		return true;
9729 
9730 	return false;
9731 }
9732 
9733 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9734 {
9735 	struct intel_encoder *encoder;
9736 	bool has_fdi = false;
9737 
9738 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9739 		switch (encoder->type) {
9740 		case INTEL_OUTPUT_ANALOG:
9741 			has_fdi = true;
9742 			break;
9743 		default:
9744 			break;
9745 		}
9746 	}
9747 
9748 	/*
9749 	 * The BIOS may have decided to use the PCH SSC
9750 	 * reference so we must not disable it until the
9751 	 * relevant PLLs have stopped relying on it. We'll
9752 	 * just leave the PCH SSC reference enabled in case
9753 	 * any active PLL is using it. It will get disabled
9754 	 * after runtime suspend if we don't have FDI.
9755 	 *
9756 	 * TODO: Move the whole reference clock handling
9757 	 * to the modeset sequence proper so that we can
9758 	 * actually enable/disable/reconfigure these things
9759 	 * safely. To do that we need to introduce a real
9760 	 * clock hierarchy. That would also allow us to do
9761 	 * clock bending finally.
9762 	 */
9763 	dev_priv->pch_ssc_use = 0;
9764 
9765 	if (spll_uses_pch_ssc(dev_priv)) {
9766 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
9767 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9768 	}
9769 
9770 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9771 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
9772 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9773 	}
9774 
9775 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9776 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
9777 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9778 	}
9779 
9780 	if (dev_priv->pch_ssc_use)
9781 		return;
9782 
9783 	if (has_fdi) {
9784 		lpt_bend_clkout_dp(dev_priv, 0);
9785 		lpt_enable_clkout_dp(dev_priv, true, true);
9786 	} else {
9787 		lpt_disable_clkout_dp(dev_priv);
9788 	}
9789 }
9790 
9791 /*
9792  * Initialize reference clocks when the driver loads
9793  */
9794 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9795 {
9796 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9797 		ilk_init_pch_refclk(dev_priv);
9798 	else if (HAS_PCH_LPT(dev_priv))
9799 		lpt_init_pch_refclk(dev_priv);
9800 }
9801 
9802 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
9803 {
9804 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9805 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9806 	enum pipe pipe = crtc->pipe;
9807 	u32 val;
9808 
9809 	val = 0;
9810 
9811 	switch (crtc_state->pipe_bpp) {
9812 	case 18:
9813 		val |= PIPECONF_6BPC;
9814 		break;
9815 	case 24:
9816 		val |= PIPECONF_8BPC;
9817 		break;
9818 	case 30:
9819 		val |= PIPECONF_10BPC;
9820 		break;
9821 	case 36:
9822 		val |= PIPECONF_12BPC;
9823 		break;
9824 	default:
9825 		/* Case prevented by intel_choose_pipe_bpp_dither. */
9826 		BUG();
9827 	}
9828 
9829 	if (crtc_state->dither)
9830 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9831 
9832 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9833 		val |= PIPECONF_INTERLACED_ILK;
9834 	else
9835 		val |= PIPECONF_PROGRESSIVE;
9836 
9837 	/*
9838 	 * This would end up with an odd purple hue over
9839 	 * the entire display. Make sure we don't do it.
9840 	 */
9841 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
9842 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9843 
9844 	if (crtc_state->limited_color_range &&
9845 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
9846 		val |= PIPECONF_COLOR_RANGE_SELECT;
9847 
9848 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9849 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9850 
9851 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9852 
9853 	val |= PIPECONF_FRAME_START_DELAY(0);
9854 
9855 	intel_de_write(dev_priv, PIPECONF(pipe), val);
9856 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
9857 }
9858 
9859 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
9860 {
9861 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9862 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9863 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9864 	u32 val = 0;
9865 
9866 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
9867 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9868 
9869 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9870 		val |= PIPECONF_INTERLACED_ILK;
9871 	else
9872 		val |= PIPECONF_PROGRESSIVE;
9873 
9874 	if (IS_HASWELL(dev_priv) &&
9875 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9876 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9877 
9878 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
9879 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
9880 }
9881 
9882 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9883 {
9884 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9885 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9886 	u32 val = 0;
9887 
9888 	switch (crtc_state->pipe_bpp) {
9889 	case 18:
9890 		val |= PIPEMISC_DITHER_6_BPC;
9891 		break;
9892 	case 24:
9893 		val |= PIPEMISC_DITHER_8_BPC;
9894 		break;
9895 	case 30:
9896 		val |= PIPEMISC_DITHER_10_BPC;
9897 		break;
9898 	case 36:
9899 		val |= PIPEMISC_DITHER_12_BPC;
9900 		break;
9901 	default:
9902 		MISSING_CASE(crtc_state->pipe_bpp);
9903 		break;
9904 	}
9905 
9906 	if (crtc_state->dither)
9907 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9908 
9909 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9910 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9911 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9912 
9913 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9914 		val |= PIPEMISC_YUV420_ENABLE |
9915 			PIPEMISC_YUV420_MODE_FULL_BLEND;
9916 
9917 	if (INTEL_GEN(dev_priv) >= 11 &&
9918 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9919 					   BIT(PLANE_CURSOR))) == 0)
9920 		val |= PIPEMISC_HDR_MODE_PRECISION;
9921 
9922 	if (INTEL_GEN(dev_priv) >= 12)
9923 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
9924 
9925 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
9926 }
9927 
9928 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9929 {
9930 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9931 	u32 tmp;
9932 
9933 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9934 
9935 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9936 	case PIPEMISC_DITHER_6_BPC:
9937 		return 18;
9938 	case PIPEMISC_DITHER_8_BPC:
9939 		return 24;
9940 	case PIPEMISC_DITHER_10_BPC:
9941 		return 30;
9942 	case PIPEMISC_DITHER_12_BPC:
9943 		return 36;
9944 	default:
9945 		MISSING_CASE(tmp);
9946 		return 0;
9947 	}
9948 }
9949 
9950 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
9951 {
9952 	/*
9953 	 * Account for spread spectrum to avoid
9954 	 * oversubscribing the link. Max center spread
9955 	 * is 2.5%; use 5% for safety's sake.
9956 	 */
9957 	u32 bps = target_clock * bpp * 21 / 20;
9958 	return DIV_ROUND_UP(bps, link_bw * 8);
9959 }
9960 
9961 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
9962 {
9963 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9964 }
9965 
9966 static void ilk_compute_dpll(struct intel_crtc *crtc,
9967 			     struct intel_crtc_state *crtc_state,
9968 			     struct dpll *reduced_clock)
9969 {
9970 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9971 	u32 dpll, fp, fp2;
9972 	int factor;
9973 
9974 	/* Enable autotuning of the PLL clock (if permissible) */
9975 	factor = 21;
9976 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9977 		if ((intel_panel_use_ssc(dev_priv) &&
9978 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
9979 		    (HAS_PCH_IBX(dev_priv) &&
9980 		     intel_is_dual_link_lvds(dev_priv)))
9981 			factor = 25;
9982 	} else if (crtc_state->sdvo_tv_clock) {
9983 		factor = 20;
9984 	}
9985 
9986 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9987 
9988 	if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
9989 		fp |= FP_CB_TUNE;
9990 
9991 	if (reduced_clock) {
9992 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
9993 
9994 		if (reduced_clock->m < factor * reduced_clock->n)
9995 			fp2 |= FP_CB_TUNE;
9996 	} else {
9997 		fp2 = fp;
9998 	}
9999 
10000 	dpll = 0;
10001 
10002 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10003 		dpll |= DPLLB_MODE_LVDS;
10004 	else
10005 		dpll |= DPLLB_MODE_DAC_SERIAL;
10006 
10007 	dpll |= (crtc_state->pixel_multiplier - 1)
10008 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10009 
10010 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10011 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10012 		dpll |= DPLL_SDVO_HIGH_SPEED;
10013 
10014 	if (intel_crtc_has_dp_encoder(crtc_state))
10015 		dpll |= DPLL_SDVO_HIGH_SPEED;
10016 
10017 	/*
10018 	 * The high speed IO clock is only really required for
10019 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
10020 	 * possible to share the DPLL between CRT and HDMI. Enabling
10021 	 * the clock needlessly does no real harm, except use up a
10022 	 * bit of power potentially.
10023 	 *
10024 	 * We'll limit this to IVB with 3 pipes, since it has only two
10025 	 * DPLLs and so DPLL sharing is the only way to get three pipes
10026 	 * driving PCH ports at the same time. On SNB we could do this,
10027 	 * and potentially avoid enabling the second DPLL, but it's not
10028 	 * clear if it''s a win or loss power wise. No point in doing
10029 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10030 	 */
10031 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10032 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10033 		dpll |= DPLL_SDVO_HIGH_SPEED;
10034 
10035 	/* compute bitmask from p1 value */
10036 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10037 	/* also FPA1 */
10038 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10039 
10040 	switch (crtc_state->dpll.p2) {
10041 	case 5:
10042 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10043 		break;
10044 	case 7:
10045 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10046 		break;
10047 	case 10:
10048 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10049 		break;
10050 	case 14:
10051 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10052 		break;
10053 	}
10054 
10055 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10056 	    intel_panel_use_ssc(dev_priv))
10057 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10058 	else
10059 		dpll |= PLL_REF_INPUT_DREFCLK;
10060 
10061 	dpll |= DPLL_VCO_ENABLE;
10062 
10063 	crtc_state->dpll_hw_state.dpll = dpll;
10064 	crtc_state->dpll_hw_state.fp0 = fp;
10065 	crtc_state->dpll_hw_state.fp1 = fp2;
10066 }
10067 
10068 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10069 				  struct intel_crtc_state *crtc_state)
10070 {
10071 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10072 	struct intel_atomic_state *state =
10073 		to_intel_atomic_state(crtc_state->uapi.state);
10074 	const struct intel_limit *limit;
10075 	int refclk = 120000;
10076 
10077 	memset(&crtc_state->dpll_hw_state, 0,
10078 	       sizeof(crtc_state->dpll_hw_state));
10079 
10080 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10081 	if (!crtc_state->has_pch_encoder)
10082 		return 0;
10083 
10084 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10085 		if (intel_panel_use_ssc(dev_priv)) {
10086 			drm_dbg_kms(&dev_priv->drm,
10087 				    "using SSC reference clock of %d kHz\n",
10088 				    dev_priv->vbt.lvds_ssc_freq);
10089 			refclk = dev_priv->vbt.lvds_ssc_freq;
10090 		}
10091 
10092 		if (intel_is_dual_link_lvds(dev_priv)) {
10093 			if (refclk == 100000)
10094 				limit = &ilk_limits_dual_lvds_100m;
10095 			else
10096 				limit = &ilk_limits_dual_lvds;
10097 		} else {
10098 			if (refclk == 100000)
10099 				limit = &ilk_limits_single_lvds_100m;
10100 			else
10101 				limit = &ilk_limits_single_lvds;
10102 		}
10103 	} else {
10104 		limit = &ilk_limits_dac;
10105 	}
10106 
10107 	if (!crtc_state->clock_set &&
10108 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10109 				refclk, NULL, &crtc_state->dpll)) {
10110 		drm_err(&dev_priv->drm,
10111 			"Couldn't find PLL settings for mode!\n");
10112 		return -EINVAL;
10113 	}
10114 
10115 	ilk_compute_dpll(crtc, crtc_state, NULL);
10116 
10117 	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10118 		drm_dbg_kms(&dev_priv->drm,
10119 			    "failed to find PLL for pipe %c\n",
10120 			    pipe_name(crtc->pipe));
10121 		return -EINVAL;
10122 	}
10123 
10124 	return 0;
10125 }
10126 
10127 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10128 					 struct intel_link_m_n *m_n)
10129 {
10130 	struct drm_device *dev = crtc->base.dev;
10131 	struct drm_i915_private *dev_priv = to_i915(dev);
10132 	enum pipe pipe = crtc->pipe;
10133 
10134 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10135 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10136 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10137 		& ~TU_SIZE_MASK;
10138 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10139 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10140 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10141 }
10142 
10143 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10144 					 enum transcoder transcoder,
10145 					 struct intel_link_m_n *m_n,
10146 					 struct intel_link_m_n *m2_n2)
10147 {
10148 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10149 	enum pipe pipe = crtc->pipe;
10150 
10151 	if (INTEL_GEN(dev_priv) >= 5) {
10152 		m_n->link_m = intel_de_read(dev_priv,
10153 					    PIPE_LINK_M1(transcoder));
10154 		m_n->link_n = intel_de_read(dev_priv,
10155 					    PIPE_LINK_N1(transcoder));
10156 		m_n->gmch_m = intel_de_read(dev_priv,
10157 					    PIPE_DATA_M1(transcoder))
10158 			& ~TU_SIZE_MASK;
10159 		m_n->gmch_n = intel_de_read(dev_priv,
10160 					    PIPE_DATA_N1(transcoder));
10161 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10162 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10163 
10164 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10165 			m2_n2->link_m = intel_de_read(dev_priv,
10166 						      PIPE_LINK_M2(transcoder));
10167 			m2_n2->link_n =	intel_de_read(dev_priv,
10168 							     PIPE_LINK_N2(transcoder));
10169 			m2_n2->gmch_m =	intel_de_read(dev_priv,
10170 							     PIPE_DATA_M2(transcoder))
10171 					& ~TU_SIZE_MASK;
10172 			m2_n2->gmch_n =	intel_de_read(dev_priv,
10173 							     PIPE_DATA_N2(transcoder));
10174 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10175 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10176 		}
10177 	} else {
10178 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10179 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10180 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10181 			& ~TU_SIZE_MASK;
10182 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10183 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10184 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10185 	}
10186 }
10187 
10188 void intel_dp_get_m_n(struct intel_crtc *crtc,
10189 		      struct intel_crtc_state *pipe_config)
10190 {
10191 	if (pipe_config->has_pch_encoder)
10192 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10193 	else
10194 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10195 					     &pipe_config->dp_m_n,
10196 					     &pipe_config->dp_m2_n2);
10197 }
10198 
10199 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10200 				   struct intel_crtc_state *pipe_config)
10201 {
10202 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10203 				     &pipe_config->fdi_m_n, NULL);
10204 }
10205 
10206 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
10207 				  u32 pos, u32 size)
10208 {
10209 	drm_rect_init(&crtc_state->pch_pfit.dst,
10210 		      pos >> 16, pos & 0xffff,
10211 		      size >> 16, size & 0xffff);
10212 }
10213 
10214 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
10215 {
10216 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10217 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10218 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
10219 	int id = -1;
10220 	int i;
10221 
10222 	/* find scaler attached to this pipe */
10223 	for (i = 0; i < crtc->num_scalers; i++) {
10224 		u32 ctl, pos, size;
10225 
10226 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10227 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
10228 			continue;
10229 
10230 		id = i;
10231 		crtc_state->pch_pfit.enabled = true;
10232 
10233 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
10234 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
10235 
10236 		ilk_get_pfit_pos_size(crtc_state, pos, size);
10237 
10238 		scaler_state->scalers[i].in_use = true;
10239 		break;
10240 	}
10241 
10242 	scaler_state->scaler_id = id;
10243 	if (id >= 0)
10244 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10245 	else
10246 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10247 }
10248 
10249 static void
10250 skl_get_initial_plane_config(struct intel_crtc *crtc,
10251 			     struct intel_initial_plane_config *plane_config)
10252 {
10253 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10254 	struct drm_device *dev = crtc->base.dev;
10255 	struct drm_i915_private *dev_priv = to_i915(dev);
10256 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10257 	enum plane_id plane_id = plane->id;
10258 	enum pipe pipe;
10259 	u32 val, base, offset, stride_mult, tiling, alpha;
10260 	int fourcc, pixel_format;
10261 	unsigned int aligned_height;
10262 	struct drm_framebuffer *fb;
10263 	struct intel_framebuffer *intel_fb;
10264 
10265 	if (!plane->get_hw_state(plane, &pipe))
10266 		return;
10267 
10268 	drm_WARN_ON(dev, pipe != crtc->pipe);
10269 
10270 	if (crtc_state->bigjoiner) {
10271 		drm_dbg_kms(&dev_priv->drm,
10272 			    "Unsupported bigjoiner configuration for initial FB\n");
10273 		return;
10274 	}
10275 
10276 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10277 	if (!intel_fb) {
10278 		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10279 		return;
10280 	}
10281 
10282 	fb = &intel_fb->base;
10283 
10284 	fb->dev = dev;
10285 
10286 	val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10287 
10288 	if (INTEL_GEN(dev_priv) >= 11)
10289 		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10290 	else
10291 		pixel_format = val & PLANE_CTL_FORMAT_MASK;
10292 
10293 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10294 		alpha = intel_de_read(dev_priv,
10295 				      PLANE_COLOR_CTL(pipe, plane_id));
10296 		alpha &= PLANE_COLOR_ALPHA_MASK;
10297 	} else {
10298 		alpha = val & PLANE_CTL_ALPHA_MASK;
10299 	}
10300 
10301 	fourcc = skl_format_to_fourcc(pixel_format,
10302 				      val & PLANE_CTL_ORDER_RGBX, alpha);
10303 	fb->format = drm_format_info(fourcc);
10304 
10305 	tiling = val & PLANE_CTL_TILED_MASK;
10306 	switch (tiling) {
10307 	case PLANE_CTL_TILED_LINEAR:
10308 		fb->modifier = DRM_FORMAT_MOD_LINEAR;
10309 		break;
10310 	case PLANE_CTL_TILED_X:
10311 		plane_config->tiling = I915_TILING_X;
10312 		fb->modifier = I915_FORMAT_MOD_X_TILED;
10313 		break;
10314 	case PLANE_CTL_TILED_Y:
10315 		plane_config->tiling = I915_TILING_Y;
10316 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10317 			fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10318 				I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10319 				I915_FORMAT_MOD_Y_TILED_CCS;
10320 		else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10321 			fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10322 		else
10323 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
10324 		break;
10325 	case PLANE_CTL_TILED_YF:
10326 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10327 			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10328 		else
10329 			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10330 		break;
10331 	default:
10332 		MISSING_CASE(tiling);
10333 		goto error;
10334 	}
10335 
10336 	/*
10337 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10338 	 * while i915 HW rotation is clockwise, thats why this swapping.
10339 	 */
10340 	switch (val & PLANE_CTL_ROTATE_MASK) {
10341 	case PLANE_CTL_ROTATE_0:
10342 		plane_config->rotation = DRM_MODE_ROTATE_0;
10343 		break;
10344 	case PLANE_CTL_ROTATE_90:
10345 		plane_config->rotation = DRM_MODE_ROTATE_270;
10346 		break;
10347 	case PLANE_CTL_ROTATE_180:
10348 		plane_config->rotation = DRM_MODE_ROTATE_180;
10349 		break;
10350 	case PLANE_CTL_ROTATE_270:
10351 		plane_config->rotation = DRM_MODE_ROTATE_90;
10352 		break;
10353 	}
10354 
10355 	if (INTEL_GEN(dev_priv) >= 10 &&
10356 	    val & PLANE_CTL_FLIP_HORIZONTAL)
10357 		plane_config->rotation |= DRM_MODE_REFLECT_X;
10358 
10359 	/* 90/270 degree rotation would require extra work */
10360 	if (drm_rotation_90_or_270(plane_config->rotation))
10361 		goto error;
10362 
10363 	base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10364 	plane_config->base = base;
10365 
10366 	offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10367 
10368 	val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10369 	fb->height = ((val >> 16) & 0xffff) + 1;
10370 	fb->width = ((val >> 0) & 0xffff) + 1;
10371 
10372 	val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10373 	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10374 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
10375 
10376 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
10377 
10378 	plane_config->size = fb->pitches[0] * aligned_height;
10379 
10380 	drm_dbg_kms(&dev_priv->drm,
10381 		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10382 		    crtc->base.name, plane->base.name, fb->width, fb->height,
10383 		    fb->format->cpp[0] * 8, base, fb->pitches[0],
10384 		    plane_config->size);
10385 
10386 	plane_config->fb = intel_fb;
10387 	return;
10388 
10389 error:
10390 	kfree(intel_fb);
10391 }
10392 
10393 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
10394 {
10395 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10396 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10397 	u32 ctl, pos, size;
10398 
10399 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10400 	if ((ctl & PF_ENABLE) == 0)
10401 		return;
10402 
10403 	crtc_state->pch_pfit.enabled = true;
10404 
10405 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
10406 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
10407 
10408 	ilk_get_pfit_pos_size(crtc_state, pos, size);
10409 
10410 	/*
10411 	 * We currently do not free assignements of panel fitters on
10412 	 * ivb/hsw (since we don't use the higher upscaling modes which
10413 	 * differentiates them) so just WARN about this case for now.
10414 	 */
10415 	drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
10416 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
10417 }
10418 
10419 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10420 				struct intel_crtc_state *pipe_config)
10421 {
10422 	struct drm_device *dev = crtc->base.dev;
10423 	struct drm_i915_private *dev_priv = to_i915(dev);
10424 	enum intel_display_power_domain power_domain;
10425 	intel_wakeref_t wakeref;
10426 	u32 tmp;
10427 	bool ret;
10428 
10429 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10430 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10431 	if (!wakeref)
10432 		return false;
10433 
10434 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10435 	pipe_config->shared_dpll = NULL;
10436 
10437 	ret = false;
10438 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10439 	if (!(tmp & PIPECONF_ENABLE))
10440 		goto out;
10441 
10442 	switch (tmp & PIPECONF_BPC_MASK) {
10443 	case PIPECONF_6BPC:
10444 		pipe_config->pipe_bpp = 18;
10445 		break;
10446 	case PIPECONF_8BPC:
10447 		pipe_config->pipe_bpp = 24;
10448 		break;
10449 	case PIPECONF_10BPC:
10450 		pipe_config->pipe_bpp = 30;
10451 		break;
10452 	case PIPECONF_12BPC:
10453 		pipe_config->pipe_bpp = 36;
10454 		break;
10455 	default:
10456 		break;
10457 	}
10458 
10459 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10460 		pipe_config->limited_color_range = true;
10461 
10462 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10463 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10464 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10465 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10466 		break;
10467 	default:
10468 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10469 		break;
10470 	}
10471 
10472 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10473 		PIPECONF_GAMMA_MODE_SHIFT;
10474 
10475 	pipe_config->csc_mode = intel_de_read(dev_priv,
10476 					      PIPE_CSC_MODE(crtc->pipe));
10477 
10478 	i9xx_get_pipe_color_config(pipe_config);
10479 	intel_color_get_config(pipe_config);
10480 
10481 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10482 		struct intel_shared_dpll *pll;
10483 		enum intel_dpll_id pll_id;
10484 		bool pll_active;
10485 
10486 		pipe_config->has_pch_encoder = true;
10487 
10488 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10489 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10490 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10491 
10492 		ilk_get_fdi_m_n_config(crtc, pipe_config);
10493 
10494 		if (HAS_PCH_IBX(dev_priv)) {
10495 			/*
10496 			 * The pipe->pch transcoder and pch transcoder->pll
10497 			 * mapping is fixed.
10498 			 */
10499 			pll_id = (enum intel_dpll_id) crtc->pipe;
10500 		} else {
10501 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10502 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10503 				pll_id = DPLL_ID_PCH_PLL_B;
10504 			else
10505 				pll_id= DPLL_ID_PCH_PLL_A;
10506 		}
10507 
10508 		pipe_config->shared_dpll =
10509 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
10510 		pll = pipe_config->shared_dpll;
10511 
10512 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10513 						     &pipe_config->dpll_hw_state);
10514 		drm_WARN_ON(dev, !pll_active);
10515 
10516 		tmp = pipe_config->dpll_hw_state.dpll;
10517 		pipe_config->pixel_multiplier =
10518 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10519 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10520 
10521 		ilk_pch_clock_get(crtc, pipe_config);
10522 	} else {
10523 		pipe_config->pixel_multiplier = 1;
10524 	}
10525 
10526 	intel_get_transcoder_timings(crtc, pipe_config);
10527 	intel_get_pipe_src_size(crtc, pipe_config);
10528 
10529 	ilk_get_pfit_config(pipe_config);
10530 
10531 	ret = true;
10532 
10533 out:
10534 	intel_display_power_put(dev_priv, power_domain, wakeref);
10535 
10536 	return ret;
10537 }
10538 
10539 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10540 				  struct intel_crtc_state *crtc_state)
10541 {
10542 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10543 	struct intel_atomic_state *state =
10544 		to_intel_atomic_state(crtc_state->uapi.state);
10545 
10546 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10547 	    INTEL_GEN(dev_priv) >= 11) {
10548 		struct intel_encoder *encoder =
10549 			intel_get_crtc_new_encoder(state, crtc_state);
10550 
10551 		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10552 			drm_dbg_kms(&dev_priv->drm,
10553 				    "failed to find PLL for pipe %c\n",
10554 				    pipe_name(crtc->pipe));
10555 			return -EINVAL;
10556 		}
10557 	}
10558 
10559 	return 0;
10560 }
10561 
10562 static void dg1_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10563 			    struct intel_crtc_state *pipe_config)
10564 {
10565 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10566 	enum phy phy = intel_port_to_phy(dev_priv, port);
10567 	struct icl_port_dpll *port_dpll;
10568 	struct intel_shared_dpll *pll;
10569 	enum intel_dpll_id id;
10570 	bool pll_active;
10571 	u32 clk_sel;
10572 
10573 	clk_sel = intel_de_read(dev_priv, DG1_DPCLKA_CFGCR0(phy)) & DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10574 	id = DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_DPLL_MAP(clk_sel, phy);
10575 
10576 	if (WARN_ON(id > DPLL_ID_DG1_DPLL3))
10577 		return;
10578 
10579 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
10580 	port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
10581 
10582 	port_dpll->pll = pll;
10583 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10584 					     &port_dpll->hw_state);
10585 	drm_WARN_ON(&dev_priv->drm, !pll_active);
10586 
10587 	icl_set_active_port_dpll(pipe_config, port_dpll_id);
10588 }
10589 
10590 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10591 			    struct intel_crtc_state *pipe_config)
10592 {
10593 	enum phy phy = intel_port_to_phy(dev_priv, port);
10594 	enum icl_port_dpll_id port_dpll_id;
10595 	struct icl_port_dpll *port_dpll;
10596 	struct intel_shared_dpll *pll;
10597 	enum intel_dpll_id id;
10598 	bool pll_active;
10599 	u32 temp;
10600 
10601 	if (intel_phy_is_combo(dev_priv, phy)) {
10602 		u32 mask, shift;
10603 
10604 		if (IS_ROCKETLAKE(dev_priv)) {
10605 			mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10606 			shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10607 		} else {
10608 			mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10609 			shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10610 		}
10611 
10612 		temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
10613 		id = temp >> shift;
10614 		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10615 	} else if (intel_phy_is_tc(dev_priv, phy)) {
10616 		u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10617 
10618 		if (clk_sel == DDI_CLK_SEL_MG) {
10619 			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10620 								    port));
10621 			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10622 		} else {
10623 			drm_WARN_ON(&dev_priv->drm,
10624 				    clk_sel < DDI_CLK_SEL_TBT_162);
10625 			id = DPLL_ID_ICL_TBTPLL;
10626 			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10627 		}
10628 	} else {
10629 		drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
10630 		return;
10631 	}
10632 
10633 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
10634 	port_dpll = &pipe_config->icl_port_dplls[port_dpll_id];
10635 
10636 	port_dpll->pll = pll;
10637 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10638 					     &port_dpll->hw_state);
10639 	drm_WARN_ON(&dev_priv->drm, !pll_active);
10640 
10641 	icl_set_active_port_dpll(pipe_config, port_dpll_id);
10642 }
10643 
10644 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10645 			    struct intel_crtc_state *pipe_config)
10646 {
10647 	struct intel_shared_dpll *pll;
10648 	enum intel_dpll_id id;
10649 	bool pll_active;
10650 	u32 temp;
10651 
10652 	temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10653 	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10654 
10655 	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
10656 		return;
10657 
10658 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
10659 
10660 	pipe_config->shared_dpll = pll;
10661 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10662 					     &pipe_config->dpll_hw_state);
10663 	drm_WARN_ON(&dev_priv->drm, !pll_active);
10664 }
10665 
10666 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10667 				enum port port,
10668 				struct intel_crtc_state *pipe_config)
10669 {
10670 	struct intel_shared_dpll *pll;
10671 	enum intel_dpll_id id;
10672 	bool pll_active;
10673 
10674 	switch (port) {
10675 	case PORT_A:
10676 		id = DPLL_ID_SKL_DPLL0;
10677 		break;
10678 	case PORT_B:
10679 		id = DPLL_ID_SKL_DPLL1;
10680 		break;
10681 	case PORT_C:
10682 		id = DPLL_ID_SKL_DPLL2;
10683 		break;
10684 	default:
10685 		drm_err(&dev_priv->drm, "Incorrect port type\n");
10686 		return;
10687 	}
10688 
10689 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
10690 
10691 	pipe_config->shared_dpll = pll;
10692 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10693 					     &pipe_config->dpll_hw_state);
10694 	drm_WARN_ON(&dev_priv->drm, !pll_active);
10695 }
10696 
10697 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10698 			    struct intel_crtc_state *pipe_config)
10699 {
10700 	struct intel_shared_dpll *pll;
10701 	enum intel_dpll_id id;
10702 	bool pll_active;
10703 	u32 temp;
10704 
10705 	temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10706 	id = temp >> (port * 3 + 1);
10707 
10708 	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
10709 		return;
10710 
10711 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
10712 
10713 	pipe_config->shared_dpll = pll;
10714 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10715 					     &pipe_config->dpll_hw_state);
10716 	drm_WARN_ON(&dev_priv->drm, !pll_active);
10717 }
10718 
10719 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10720 			    struct intel_crtc_state *pipe_config)
10721 {
10722 	struct intel_shared_dpll *pll;
10723 	enum intel_dpll_id id;
10724 	u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10725 	bool pll_active;
10726 
10727 	switch (ddi_pll_sel) {
10728 	case PORT_CLK_SEL_WRPLL1:
10729 		id = DPLL_ID_WRPLL1;
10730 		break;
10731 	case PORT_CLK_SEL_WRPLL2:
10732 		id = DPLL_ID_WRPLL2;
10733 		break;
10734 	case PORT_CLK_SEL_SPLL:
10735 		id = DPLL_ID_SPLL;
10736 		break;
10737 	case PORT_CLK_SEL_LCPLL_810:
10738 		id = DPLL_ID_LCPLL_810;
10739 		break;
10740 	case PORT_CLK_SEL_LCPLL_1350:
10741 		id = DPLL_ID_LCPLL_1350;
10742 		break;
10743 	case PORT_CLK_SEL_LCPLL_2700:
10744 		id = DPLL_ID_LCPLL_2700;
10745 		break;
10746 	default:
10747 		MISSING_CASE(ddi_pll_sel);
10748 		fallthrough;
10749 	case PORT_CLK_SEL_NONE:
10750 		return;
10751 	}
10752 
10753 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
10754 
10755 	pipe_config->shared_dpll = pll;
10756 	pll_active = intel_dpll_get_hw_state(dev_priv, pll,
10757 					     &pipe_config->dpll_hw_state);
10758 	drm_WARN_ON(&dev_priv->drm, !pll_active);
10759 }
10760 
10761 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10762 				     struct intel_crtc_state *pipe_config,
10763 				     struct intel_display_power_domain_set *power_domain_set)
10764 {
10765 	struct drm_device *dev = crtc->base.dev;
10766 	struct drm_i915_private *dev_priv = to_i915(dev);
10767 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
10768 	unsigned long enabled_panel_transcoders = 0;
10769 	enum transcoder panel_transcoder;
10770 	u32 tmp;
10771 
10772 	if (INTEL_GEN(dev_priv) >= 11)
10773 		panel_transcoder_mask |=
10774 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10775 
10776 	/*
10777 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
10778 	 * and DSI transcoders handled below.
10779 	 */
10780 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10781 
10782 	/*
10783 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10784 	 * consistency and less surprising code; it's in always on power).
10785 	 */
10786 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
10787 				       panel_transcoder_mask) {
10788 		bool force_thru = false;
10789 		enum pipe trans_pipe;
10790 
10791 		tmp = intel_de_read(dev_priv,
10792 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
10793 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10794 			continue;
10795 
10796 		/*
10797 		 * Log all enabled ones, only use the first one.
10798 		 *
10799 		 * FIXME: This won't work for two separate DSI displays.
10800 		 */
10801 		enabled_panel_transcoders |= BIT(panel_transcoder);
10802 		if (enabled_panel_transcoders != BIT(panel_transcoder))
10803 			continue;
10804 
10805 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10806 		default:
10807 			drm_WARN(dev, 1,
10808 				 "unknown pipe linked to transcoder %s\n",
10809 				 transcoder_name(panel_transcoder));
10810 			fallthrough;
10811 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
10812 			force_thru = true;
10813 			fallthrough;
10814 		case TRANS_DDI_EDP_INPUT_A_ON:
10815 			trans_pipe = PIPE_A;
10816 			break;
10817 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
10818 			trans_pipe = PIPE_B;
10819 			break;
10820 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
10821 			trans_pipe = PIPE_C;
10822 			break;
10823 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
10824 			trans_pipe = PIPE_D;
10825 			break;
10826 		}
10827 
10828 		if (trans_pipe == crtc->pipe) {
10829 			pipe_config->cpu_transcoder = panel_transcoder;
10830 			pipe_config->pch_pfit.force_thru = force_thru;
10831 		}
10832 	}
10833 
10834 	/*
10835 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10836 	 */
10837 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10838 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10839 
10840 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
10841 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
10842 		return false;
10843 
10844 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
10845 
10846 	return tmp & PIPECONF_ENABLE;
10847 }
10848 
10849 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10850 					 struct intel_crtc_state *pipe_config,
10851 					 struct intel_display_power_domain_set *power_domain_set)
10852 {
10853 	struct drm_device *dev = crtc->base.dev;
10854 	struct drm_i915_private *dev_priv = to_i915(dev);
10855 	enum transcoder cpu_transcoder;
10856 	enum port port;
10857 	u32 tmp;
10858 
10859 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10860 		if (port == PORT_A)
10861 			cpu_transcoder = TRANSCODER_DSI_A;
10862 		else
10863 			cpu_transcoder = TRANSCODER_DSI_C;
10864 
10865 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
10866 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
10867 			continue;
10868 
10869 		/*
10870 		 * The PLL needs to be enabled with a valid divider
10871 		 * configuration, otherwise accessing DSI registers will hang
10872 		 * the machine. See BSpec North Display Engine
10873 		 * registers/MIPI[BXT]. We can break out here early, since we
10874 		 * need the same DSI PLL to be enabled for both DSI ports.
10875 		 */
10876 		if (!bxt_dsi_pll_is_enabled(dev_priv))
10877 			break;
10878 
10879 		/* XXX: this works for video mode only */
10880 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
10881 		if (!(tmp & DPI_ENABLE))
10882 			continue;
10883 
10884 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
10885 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10886 			continue;
10887 
10888 		pipe_config->cpu_transcoder = cpu_transcoder;
10889 		break;
10890 	}
10891 
10892 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
10893 }
10894 
10895 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
10896 				   struct intel_crtc_state *pipe_config)
10897 {
10898 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10899 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10900 	enum port port;
10901 	u32 tmp;
10902 
10903 	if (transcoder_is_dsi(cpu_transcoder)) {
10904 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10905 						PORT_A : PORT_B;
10906 	} else {
10907 		tmp = intel_de_read(dev_priv,
10908 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
10909 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10910 			return;
10911 		if (INTEL_GEN(dev_priv) >= 12)
10912 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10913 		else
10914 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10915 	}
10916 
10917 	if (IS_DG1(dev_priv))
10918 		dg1_get_ddi_pll(dev_priv, port, pipe_config);
10919 	else if (INTEL_GEN(dev_priv) >= 11)
10920 		icl_get_ddi_pll(dev_priv, port, pipe_config);
10921 	else if (IS_CANNONLAKE(dev_priv))
10922 		cnl_get_ddi_pll(dev_priv, port, pipe_config);
10923 	else if (IS_GEN9_LP(dev_priv))
10924 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
10925 	else if (IS_GEN9_BC(dev_priv))
10926 		skl_get_ddi_pll(dev_priv, port, pipe_config);
10927 	else
10928 		hsw_get_ddi_pll(dev_priv, port, pipe_config);
10929 
10930 	/*
10931 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10932 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
10933 	 * the PCH transcoder is on.
10934 	 */
10935 	if (INTEL_GEN(dev_priv) < 9 &&
10936 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
10937 		pipe_config->has_pch_encoder = true;
10938 
10939 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
10940 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10941 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10942 
10943 		ilk_get_fdi_m_n_config(crtc, pipe_config);
10944 	}
10945 }
10946 
10947 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
10948 				struct intel_crtc_state *pipe_config)
10949 {
10950 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10951 	struct intel_display_power_domain_set power_domain_set = { };
10952 	bool active;
10953 	u32 tmp;
10954 
10955 	pipe_config->master_transcoder = INVALID_TRANSCODER;
10956 
10957 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
10958 						       POWER_DOMAIN_PIPE(crtc->pipe)))
10959 		return false;
10960 
10961 	pipe_config->shared_dpll = NULL;
10962 
10963 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
10964 
10965 	if (IS_GEN9_LP(dev_priv) &&
10966 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
10967 		drm_WARN_ON(&dev_priv->drm, active);
10968 		active = true;
10969 	}
10970 
10971 	intel_dsc_get_config(pipe_config);
10972 
10973 	if (!active) {
10974 		/* bigjoiner slave doesn't enable transcoder */
10975 		if (!pipe_config->bigjoiner_slave)
10976 			goto out;
10977 
10978 		active = true;
10979 		pipe_config->pixel_multiplier = 1;
10980 
10981 		/* we cannot read out most state, so don't bother.. */
10982 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
10983 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10984 	    INTEL_GEN(dev_priv) >= 11) {
10985 		hsw_get_ddi_port_state(crtc, pipe_config);
10986 		intel_get_transcoder_timings(crtc, pipe_config);
10987 	}
10988 
10989 	intel_get_pipe_src_size(crtc, pipe_config);
10990 
10991 	if (IS_HASWELL(dev_priv)) {
10992 		u32 tmp = intel_de_read(dev_priv,
10993 					PIPECONF(pipe_config->cpu_transcoder));
10994 
10995 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10996 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10997 		else
10998 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10999 	} else {
11000 		pipe_config->output_format =
11001 			bdw_get_pipemisc_output_format(crtc);
11002 	}
11003 
11004 	pipe_config->gamma_mode = intel_de_read(dev_priv,
11005 						GAMMA_MODE(crtc->pipe));
11006 
11007 	pipe_config->csc_mode = intel_de_read(dev_priv,
11008 					      PIPE_CSC_MODE(crtc->pipe));
11009 
11010 	if (INTEL_GEN(dev_priv) >= 9) {
11011 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11012 
11013 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11014 			pipe_config->gamma_enable = true;
11015 
11016 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11017 			pipe_config->csc_enable = true;
11018 	} else {
11019 		i9xx_get_pipe_color_config(pipe_config);
11020 	}
11021 
11022 	intel_color_get_config(pipe_config);
11023 
11024 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11025 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11026 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11027 		pipe_config->ips_linetime =
11028 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11029 
11030 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
11031 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
11032 		if (INTEL_GEN(dev_priv) >= 9)
11033 			skl_get_pfit_config(pipe_config);
11034 		else
11035 			ilk_get_pfit_config(pipe_config);
11036 	}
11037 
11038 	if (hsw_crtc_supports_ips(crtc)) {
11039 		if (IS_HASWELL(dev_priv))
11040 			pipe_config->ips_enabled = intel_de_read(dev_priv,
11041 								 IPS_CTL) & IPS_ENABLE;
11042 		else {
11043 			/*
11044 			 * We cannot readout IPS state on broadwell, set to
11045 			 * true so we can set it to a defined state on first
11046 			 * commit.
11047 			 */
11048 			pipe_config->ips_enabled = true;
11049 		}
11050 	}
11051 
11052 	if (pipe_config->bigjoiner_slave) {
11053 		/* Cannot be read out as a slave, set to 0. */
11054 		pipe_config->pixel_multiplier = 0;
11055 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11056 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11057 		pipe_config->pixel_multiplier =
11058 			intel_de_read(dev_priv,
11059 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11060 	} else {
11061 		pipe_config->pixel_multiplier = 1;
11062 	}
11063 
11064 out:
11065 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
11066 
11067 	return active;
11068 }
11069 
11070 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
11071 {
11072 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11073 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
11074 
11075 	if (!i915->display.get_pipe_config(crtc, crtc_state))
11076 		return false;
11077 
11078 	crtc_state->hw.active = true;
11079 
11080 	intel_crtc_readout_derived_state(crtc_state);
11081 
11082 	return true;
11083 }
11084 
11085 /* VESA 640x480x72Hz mode to set on the pipe */
11086 static const struct drm_display_mode load_detect_mode = {
11087 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11088 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11089 };
11090 
11091 struct drm_framebuffer *
11092 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11093 			 struct drm_mode_fb_cmd2 *mode_cmd)
11094 {
11095 	struct intel_framebuffer *intel_fb;
11096 	int ret;
11097 
11098 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11099 	if (!intel_fb)
11100 		return ERR_PTR(-ENOMEM);
11101 
11102 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11103 	if (ret)
11104 		goto err;
11105 
11106 	return &intel_fb->base;
11107 
11108 err:
11109 	kfree(intel_fb);
11110 	return ERR_PTR(ret);
11111 }
11112 
11113 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11114 					struct drm_crtc *crtc)
11115 {
11116 	struct drm_plane *plane;
11117 	struct drm_plane_state *plane_state;
11118 	int ret, i;
11119 
11120 	ret = drm_atomic_add_affected_planes(state, crtc);
11121 	if (ret)
11122 		return ret;
11123 
11124 	for_each_new_plane_in_state(state, plane, plane_state, i) {
11125 		if (plane_state->crtc != crtc)
11126 			continue;
11127 
11128 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11129 		if (ret)
11130 			return ret;
11131 
11132 		drm_atomic_set_fb_for_plane(plane_state, NULL);
11133 	}
11134 
11135 	return 0;
11136 }
11137 
11138 int intel_get_load_detect_pipe(struct drm_connector *connector,
11139 			       struct intel_load_detect_pipe *old,
11140 			       struct drm_modeset_acquire_ctx *ctx)
11141 {
11142 	struct intel_crtc *intel_crtc;
11143 	struct intel_encoder *intel_encoder =
11144 		intel_attached_encoder(to_intel_connector(connector));
11145 	struct drm_crtc *possible_crtc;
11146 	struct drm_encoder *encoder = &intel_encoder->base;
11147 	struct drm_crtc *crtc = NULL;
11148 	struct drm_device *dev = encoder->dev;
11149 	struct drm_i915_private *dev_priv = to_i915(dev);
11150 	struct drm_mode_config *config = &dev->mode_config;
11151 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
11152 	struct drm_connector_state *connector_state;
11153 	struct intel_crtc_state *crtc_state;
11154 	int ret, i = -1;
11155 
11156 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11157 		    connector->base.id, connector->name,
11158 		    encoder->base.id, encoder->name);
11159 
11160 	old->restore_state = NULL;
11161 
11162 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
11163 
11164 	/*
11165 	 * Algorithm gets a little messy:
11166 	 *
11167 	 *   - if the connector already has an assigned crtc, use it (but make
11168 	 *     sure it's on first)
11169 	 *
11170 	 *   - try to find the first unused crtc that can drive this connector,
11171 	 *     and use that if we find one
11172 	 */
11173 
11174 	/* See if we already have a CRTC for this connector */
11175 	if (connector->state->crtc) {
11176 		crtc = connector->state->crtc;
11177 
11178 		ret = drm_modeset_lock(&crtc->mutex, ctx);
11179 		if (ret)
11180 			goto fail;
11181 
11182 		/* Make sure the crtc and connector are running */
11183 		goto found;
11184 	}
11185 
11186 	/* Find an unused one (if possible) */
11187 	for_each_crtc(dev, possible_crtc) {
11188 		i++;
11189 		if (!(encoder->possible_crtcs & (1 << i)))
11190 			continue;
11191 
11192 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11193 		if (ret)
11194 			goto fail;
11195 
11196 		if (possible_crtc->state->enable) {
11197 			drm_modeset_unlock(&possible_crtc->mutex);
11198 			continue;
11199 		}
11200 
11201 		crtc = possible_crtc;
11202 		break;
11203 	}
11204 
11205 	/*
11206 	 * If we didn't find an unused CRTC, don't use any.
11207 	 */
11208 	if (!crtc) {
11209 		drm_dbg_kms(&dev_priv->drm,
11210 			    "no pipe available for load-detect\n");
11211 		ret = -ENODEV;
11212 		goto fail;
11213 	}
11214 
11215 found:
11216 	intel_crtc = to_intel_crtc(crtc);
11217 
11218 	state = drm_atomic_state_alloc(dev);
11219 	restore_state = drm_atomic_state_alloc(dev);
11220 	if (!state || !restore_state) {
11221 		ret = -ENOMEM;
11222 		goto fail;
11223 	}
11224 
11225 	state->acquire_ctx = ctx;
11226 	restore_state->acquire_ctx = ctx;
11227 
11228 	connector_state = drm_atomic_get_connector_state(state, connector);
11229 	if (IS_ERR(connector_state)) {
11230 		ret = PTR_ERR(connector_state);
11231 		goto fail;
11232 	}
11233 
11234 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11235 	if (ret)
11236 		goto fail;
11237 
11238 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11239 	if (IS_ERR(crtc_state)) {
11240 		ret = PTR_ERR(crtc_state);
11241 		goto fail;
11242 	}
11243 
11244 	crtc_state->uapi.active = true;
11245 
11246 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11247 					   &load_detect_mode);
11248 	if (ret)
11249 		goto fail;
11250 
11251 	ret = intel_modeset_disable_planes(state, crtc);
11252 	if (ret)
11253 		goto fail;
11254 
11255 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11256 	if (!ret)
11257 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11258 	if (!ret)
11259 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
11260 	if (ret) {
11261 		drm_dbg_kms(&dev_priv->drm,
11262 			    "Failed to create a copy of old state to restore: %i\n",
11263 			    ret);
11264 		goto fail;
11265 	}
11266 
11267 	ret = drm_atomic_commit(state);
11268 	if (ret) {
11269 		drm_dbg_kms(&dev_priv->drm,
11270 			    "failed to set mode on load-detect pipe\n");
11271 		goto fail;
11272 	}
11273 
11274 	old->restore_state = restore_state;
11275 	drm_atomic_state_put(state);
11276 
11277 	/* let the connector get through one full cycle before testing */
11278 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11279 	return true;
11280 
11281 fail:
11282 	if (state) {
11283 		drm_atomic_state_put(state);
11284 		state = NULL;
11285 	}
11286 	if (restore_state) {
11287 		drm_atomic_state_put(restore_state);
11288 		restore_state = NULL;
11289 	}
11290 
11291 	if (ret == -EDEADLK)
11292 		return ret;
11293 
11294 	return false;
11295 }
11296 
11297 void intel_release_load_detect_pipe(struct drm_connector *connector,
11298 				    struct intel_load_detect_pipe *old,
11299 				    struct drm_modeset_acquire_ctx *ctx)
11300 {
11301 	struct intel_encoder *intel_encoder =
11302 		intel_attached_encoder(to_intel_connector(connector));
11303 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
11304 	struct drm_encoder *encoder = &intel_encoder->base;
11305 	struct drm_atomic_state *state = old->restore_state;
11306 	int ret;
11307 
11308 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11309 		    connector->base.id, connector->name,
11310 		    encoder->base.id, encoder->name);
11311 
11312 	if (!state)
11313 		return;
11314 
11315 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11316 	if (ret)
11317 		drm_dbg_kms(&i915->drm,
11318 			    "Couldn't release load detect pipe: %i\n", ret);
11319 	drm_atomic_state_put(state);
11320 }
11321 
11322 static int i9xx_pll_refclk(struct drm_device *dev,
11323 			   const struct intel_crtc_state *pipe_config)
11324 {
11325 	struct drm_i915_private *dev_priv = to_i915(dev);
11326 	u32 dpll = pipe_config->dpll_hw_state.dpll;
11327 
11328 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11329 		return dev_priv->vbt.lvds_ssc_freq;
11330 	else if (HAS_PCH_SPLIT(dev_priv))
11331 		return 120000;
11332 	else if (!IS_GEN(dev_priv, 2))
11333 		return 96000;
11334 	else
11335 		return 48000;
11336 }
11337 
11338 /* Returns the clock of the currently programmed mode of the given pipe. */
11339 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11340 				struct intel_crtc_state *pipe_config)
11341 {
11342 	struct drm_device *dev = crtc->base.dev;
11343 	struct drm_i915_private *dev_priv = to_i915(dev);
11344 	enum pipe pipe = crtc->pipe;
11345 	u32 dpll = pipe_config->dpll_hw_state.dpll;
11346 	u32 fp;
11347 	struct dpll clock;
11348 	int port_clock;
11349 	int refclk = i9xx_pll_refclk(dev, pipe_config);
11350 
11351 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11352 		fp = pipe_config->dpll_hw_state.fp0;
11353 	else
11354 		fp = pipe_config->dpll_hw_state.fp1;
11355 
11356 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11357 	if (IS_PINEVIEW(dev_priv)) {
11358 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11359 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11360 	} else {
11361 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11362 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11363 	}
11364 
11365 	if (!IS_GEN(dev_priv, 2)) {
11366 		if (IS_PINEVIEW(dev_priv))
11367 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11368 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11369 		else
11370 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11371 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
11372 
11373 		switch (dpll & DPLL_MODE_MASK) {
11374 		case DPLLB_MODE_DAC_SERIAL:
11375 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11376 				5 : 10;
11377 			break;
11378 		case DPLLB_MODE_LVDS:
11379 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11380 				7 : 14;
11381 			break;
11382 		default:
11383 			drm_dbg_kms(&dev_priv->drm,
11384 				    "Unknown DPLL mode %08x in programmed "
11385 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
11386 			return;
11387 		}
11388 
11389 		if (IS_PINEVIEW(dev_priv))
11390 			port_clock = pnv_calc_dpll_params(refclk, &clock);
11391 		else
11392 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
11393 	} else {
11394 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
11395 								 LVDS);
11396 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11397 
11398 		if (is_lvds) {
11399 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11400 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
11401 
11402 			if (lvds & LVDS_CLKB_POWER_UP)
11403 				clock.p2 = 7;
11404 			else
11405 				clock.p2 = 14;
11406 		} else {
11407 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
11408 				clock.p1 = 2;
11409 			else {
11410 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11411 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11412 			}
11413 			if (dpll & PLL_P2_DIVIDE_BY_4)
11414 				clock.p2 = 4;
11415 			else
11416 				clock.p2 = 2;
11417 		}
11418 
11419 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
11420 	}
11421 
11422 	/*
11423 	 * This value includes pixel_multiplier. We will use
11424 	 * port_clock to compute adjusted_mode.crtc_clock in the
11425 	 * encoder's get_config() function.
11426 	 */
11427 	pipe_config->port_clock = port_clock;
11428 }
11429 
11430 int intel_dotclock_calculate(int link_freq,
11431 			     const struct intel_link_m_n *m_n)
11432 {
11433 	/*
11434 	 * The calculation for the data clock is:
11435 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11436 	 * But we want to avoid losing precison if possible, so:
11437 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11438 	 *
11439 	 * and the link clock is simpler:
11440 	 * link_clock = (m * link_clock) / n
11441 	 */
11442 
11443 	if (!m_n->link_n)
11444 		return 0;
11445 
11446 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11447 }
11448 
11449 static void ilk_pch_clock_get(struct intel_crtc *crtc,
11450 			      struct intel_crtc_state *pipe_config)
11451 {
11452 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11453 
11454 	/* read out port_clock from the DPLL */
11455 	i9xx_crtc_clock_get(crtc, pipe_config);
11456 
11457 	/*
11458 	 * In case there is an active pipe without active ports,
11459 	 * we may need some idea for the dotclock anyway.
11460 	 * Calculate one based on the FDI configuration.
11461 	 */
11462 	pipe_config->hw.adjusted_mode.crtc_clock =
11463 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11464 					 &pipe_config->fdi_m_n);
11465 }
11466 
11467 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
11468 				   struct intel_crtc *crtc)
11469 {
11470 	memset(crtc_state, 0, sizeof(*crtc_state));
11471 
11472 	__drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
11473 
11474 	crtc_state->cpu_transcoder = INVALID_TRANSCODER;
11475 	crtc_state->master_transcoder = INVALID_TRANSCODER;
11476 	crtc_state->hsw_workaround_pipe = INVALID_PIPE;
11477 	crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
11478 	crtc_state->scaler_state.scaler_id = -1;
11479 	crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
11480 }
11481 
11482 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
11483 {
11484 	struct intel_crtc_state *crtc_state;
11485 
11486 	crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
11487 
11488 	if (crtc_state)
11489 		intel_crtc_state_reset(crtc_state, crtc);
11490 
11491 	return crtc_state;
11492 }
11493 
11494 /* Returns the currently programmed mode of the given encoder. */
11495 struct drm_display_mode *
11496 intel_encoder_current_mode(struct intel_encoder *encoder)
11497 {
11498 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11499 	struct intel_crtc_state *crtc_state;
11500 	struct drm_display_mode *mode;
11501 	struct intel_crtc *crtc;
11502 	enum pipe pipe;
11503 
11504 	if (!encoder->get_hw_state(encoder, &pipe))
11505 		return NULL;
11506 
11507 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11508 
11509 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11510 	if (!mode)
11511 		return NULL;
11512 
11513 	crtc_state = intel_crtc_state_alloc(crtc);
11514 	if (!crtc_state) {
11515 		kfree(mode);
11516 		return NULL;
11517 	}
11518 
11519 	if (!intel_crtc_get_pipe_config(crtc_state)) {
11520 		kfree(crtc_state);
11521 		kfree(mode);
11522 		return NULL;
11523 	}
11524 
11525 	intel_encoder_get_config(encoder, crtc_state);
11526 
11527 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
11528 
11529 	kfree(crtc_state);
11530 
11531 	return mode;
11532 }
11533 
11534 static void intel_crtc_destroy(struct drm_crtc *crtc)
11535 {
11536 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11537 
11538 	drm_crtc_cleanup(crtc);
11539 	kfree(intel_crtc);
11540 }
11541 
11542 /**
11543  * intel_wm_need_update - Check whether watermarks need updating
11544  * @cur: current plane state
11545  * @new: new plane state
11546  *
11547  * Check current plane state versus the new one to determine whether
11548  * watermarks need to be recalculated.
11549  *
11550  * Returns true or false.
11551  */
11552 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11553 				 struct intel_plane_state *new)
11554 {
11555 	/* Update watermarks on tiling or size changes. */
11556 	if (new->uapi.visible != cur->uapi.visible)
11557 		return true;
11558 
11559 	if (!cur->hw.fb || !new->hw.fb)
11560 		return false;
11561 
11562 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
11563 	    cur->hw.rotation != new->hw.rotation ||
11564 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
11565 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
11566 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
11567 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
11568 		return true;
11569 
11570 	return false;
11571 }
11572 
11573 static bool needs_scaling(const struct intel_plane_state *state)
11574 {
11575 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
11576 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
11577 	int dst_w = drm_rect_width(&state->uapi.dst);
11578 	int dst_h = drm_rect_height(&state->uapi.dst);
11579 
11580 	return (src_w != dst_w || src_h != dst_h);
11581 }
11582 
11583 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11584 				    struct intel_crtc_state *crtc_state,
11585 				    const struct intel_plane_state *old_plane_state,
11586 				    struct intel_plane_state *plane_state)
11587 {
11588 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11589 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11590 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11591 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
11592 	bool was_crtc_enabled = old_crtc_state->hw.active;
11593 	bool is_crtc_enabled = crtc_state->hw.active;
11594 	bool turn_off, turn_on, visible, was_visible;
11595 	int ret;
11596 
11597 	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11598 		ret = skl_update_scaler_plane(crtc_state, plane_state);
11599 		if (ret)
11600 			return ret;
11601 	}
11602 
11603 	was_visible = old_plane_state->uapi.visible;
11604 	visible = plane_state->uapi.visible;
11605 
11606 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
11607 		was_visible = false;
11608 
11609 	/*
11610 	 * Visibility is calculated as if the crtc was on, but
11611 	 * after scaler setup everything depends on it being off
11612 	 * when the crtc isn't active.
11613 	 *
11614 	 * FIXME this is wrong for watermarks. Watermarks should also
11615 	 * be computed as if the pipe would be active. Perhaps move
11616 	 * per-plane wm computation to the .check_plane() hook, and
11617 	 * only combine the results from all planes in the current place?
11618 	 */
11619 	if (!is_crtc_enabled) {
11620 		intel_plane_set_invisible(crtc_state, plane_state);
11621 		visible = false;
11622 	}
11623 
11624 	if (!was_visible && !visible)
11625 		return 0;
11626 
11627 	turn_off = was_visible && (!visible || mode_changed);
11628 	turn_on = visible && (!was_visible || mode_changed);
11629 
11630 	drm_dbg_atomic(&dev_priv->drm,
11631 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11632 		       crtc->base.base.id, crtc->base.name,
11633 		       plane->base.base.id, plane->base.name,
11634 		       was_visible, visible,
11635 		       turn_off, turn_on, mode_changed);
11636 
11637 	if (turn_on) {
11638 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11639 			crtc_state->update_wm_pre = true;
11640 
11641 		/* must disable cxsr around plane enable/disable */
11642 		if (plane->id != PLANE_CURSOR)
11643 			crtc_state->disable_cxsr = true;
11644 	} else if (turn_off) {
11645 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11646 			crtc_state->update_wm_post = true;
11647 
11648 		/* must disable cxsr around plane enable/disable */
11649 		if (plane->id != PLANE_CURSOR)
11650 			crtc_state->disable_cxsr = true;
11651 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
11652 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11653 			/* FIXME bollocks */
11654 			crtc_state->update_wm_pre = true;
11655 			crtc_state->update_wm_post = true;
11656 		}
11657 	}
11658 
11659 	if (visible || was_visible)
11660 		crtc_state->fb_bits |= plane->frontbuffer_bit;
11661 
11662 	/*
11663 	 * ILK/SNB DVSACNTR/Sprite Enable
11664 	 * IVB SPR_CTL/Sprite Enable
11665 	 * "When in Self Refresh Big FIFO mode, a write to enable the
11666 	 *  plane will be internally buffered and delayed while Big FIFO
11667 	 *  mode is exiting."
11668 	 *
11669 	 * Which means that enabling the sprite can take an extra frame
11670 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
11671 	 * down to LP0 and wait for vblank in order to make sure the
11672 	 * sprite gets enabled on the next vblank after the register write.
11673 	 * Doing otherwise would risk enabling the sprite one frame after
11674 	 * we've already signalled flip completion. We can resume LP1+
11675 	 * once the sprite has been enabled.
11676 	 *
11677 	 *
11678 	 * WaCxSRDisabledForSpriteScaling:ivb
11679 	 * IVB SPR_SCALE/Scaling Enable
11680 	 * "Low Power watermarks must be disabled for at least one
11681 	 *  frame before enabling sprite scaling, and kept disabled
11682 	 *  until sprite scaling is disabled."
11683 	 *
11684 	 * ILK/SNB DVSASCALE/Scaling Enable
11685 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
11686 	 *  masked off while Big FIFO mode is exiting."
11687 	 *
11688 	 * Despite the w/a only being listed for IVB we assume that
11689 	 * the ILK/SNB note has similar ramifications, hence we apply
11690 	 * the w/a on all three platforms.
11691 	 *
11692 	 * With experimental results seems this is needed also for primary
11693 	 * plane, not only sprite plane.
11694 	 */
11695 	if (plane->id != PLANE_CURSOR &&
11696 	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
11697 	     IS_IVYBRIDGE(dev_priv)) &&
11698 	    (turn_on || (!needs_scaling(old_plane_state) &&
11699 			 needs_scaling(plane_state))))
11700 		crtc_state->disable_lp_wm = true;
11701 
11702 	return 0;
11703 }
11704 
11705 static bool encoders_cloneable(const struct intel_encoder *a,
11706 			       const struct intel_encoder *b)
11707 {
11708 	/* masks could be asymmetric, so check both ways */
11709 	return a == b || (a->cloneable & (1 << b->type) &&
11710 			  b->cloneable & (1 << a->type));
11711 }
11712 
11713 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
11714 					 struct intel_crtc *crtc,
11715 					 struct intel_encoder *encoder)
11716 {
11717 	struct intel_encoder *source_encoder;
11718 	struct drm_connector *connector;
11719 	struct drm_connector_state *connector_state;
11720 	int i;
11721 
11722 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
11723 		if (connector_state->crtc != &crtc->base)
11724 			continue;
11725 
11726 		source_encoder =
11727 			to_intel_encoder(connector_state->best_encoder);
11728 		if (!encoders_cloneable(encoder, source_encoder))
11729 			return false;
11730 	}
11731 
11732 	return true;
11733 }
11734 
11735 static int icl_add_linked_planes(struct intel_atomic_state *state)
11736 {
11737 	struct intel_plane *plane, *linked;
11738 	struct intel_plane_state *plane_state, *linked_plane_state;
11739 	int i;
11740 
11741 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11742 		linked = plane_state->planar_linked_plane;
11743 
11744 		if (!linked)
11745 			continue;
11746 
11747 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
11748 		if (IS_ERR(linked_plane_state))
11749 			return PTR_ERR(linked_plane_state);
11750 
11751 		drm_WARN_ON(state->base.dev,
11752 			    linked_plane_state->planar_linked_plane != plane);
11753 		drm_WARN_ON(state->base.dev,
11754 			    linked_plane_state->planar_slave == plane_state->planar_slave);
11755 	}
11756 
11757 	return 0;
11758 }
11759 
11760 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11761 {
11762 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11763 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11764 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
11765 	struct intel_plane *plane, *linked;
11766 	struct intel_plane_state *plane_state;
11767 	int i;
11768 
11769 	if (INTEL_GEN(dev_priv) < 11)
11770 		return 0;
11771 
11772 	/*
11773 	 * Destroy all old plane links and make the slave plane invisible
11774 	 * in the crtc_state->active_planes mask.
11775 	 */
11776 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11777 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
11778 			continue;
11779 
11780 		plane_state->planar_linked_plane = NULL;
11781 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
11782 			crtc_state->enabled_planes &= ~BIT(plane->id);
11783 			crtc_state->active_planes &= ~BIT(plane->id);
11784 			crtc_state->update_planes |= BIT(plane->id);
11785 		}
11786 
11787 		plane_state->planar_slave = false;
11788 	}
11789 
11790 	if (!crtc_state->nv12_planes)
11791 		return 0;
11792 
11793 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11794 		struct intel_plane_state *linked_state = NULL;
11795 
11796 		if (plane->pipe != crtc->pipe ||
11797 		    !(crtc_state->nv12_planes & BIT(plane->id)))
11798 			continue;
11799 
11800 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11801 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
11802 				continue;
11803 
11804 			if (crtc_state->active_planes & BIT(linked->id))
11805 				continue;
11806 
11807 			linked_state = intel_atomic_get_plane_state(state, linked);
11808 			if (IS_ERR(linked_state))
11809 				return PTR_ERR(linked_state);
11810 
11811 			break;
11812 		}
11813 
11814 		if (!linked_state) {
11815 			drm_dbg_kms(&dev_priv->drm,
11816 				    "Need %d free Y planes for planar YUV\n",
11817 				    hweight8(crtc_state->nv12_planes));
11818 
11819 			return -EINVAL;
11820 		}
11821 
11822 		plane_state->planar_linked_plane = linked;
11823 
11824 		linked_state->planar_slave = true;
11825 		linked_state->planar_linked_plane = plane;
11826 		crtc_state->enabled_planes |= BIT(linked->id);
11827 		crtc_state->active_planes |= BIT(linked->id);
11828 		crtc_state->update_planes |= BIT(linked->id);
11829 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
11830 			    linked->base.name, plane->base.name);
11831 
11832 		/* Copy parameters to slave plane */
11833 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
11834 		linked_state->color_ctl = plane_state->color_ctl;
11835 		linked_state->view = plane_state->view;
11836 		memcpy(linked_state->color_plane, plane_state->color_plane,
11837 		       sizeof(linked_state->color_plane));
11838 
11839 		intel_plane_copy_hw_state(linked_state, plane_state);
11840 		linked_state->uapi.src = plane_state->uapi.src;
11841 		linked_state->uapi.dst = plane_state->uapi.dst;
11842 
11843 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
11844 			if (linked->id == PLANE_SPRITE5)
11845 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
11846 			else if (linked->id == PLANE_SPRITE4)
11847 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
11848 			else if (linked->id == PLANE_SPRITE3)
11849 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
11850 			else if (linked->id == PLANE_SPRITE2)
11851 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
11852 			else
11853 				MISSING_CASE(linked->id);
11854 		}
11855 	}
11856 
11857 	return 0;
11858 }
11859 
11860 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11861 {
11862 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
11863 	struct intel_atomic_state *state =
11864 		to_intel_atomic_state(new_crtc_state->uapi.state);
11865 	const struct intel_crtc_state *old_crtc_state =
11866 		intel_atomic_get_old_crtc_state(state, crtc);
11867 
11868 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11869 }
11870 
11871 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
11872 {
11873 	const struct drm_display_mode *pipe_mode =
11874 		&crtc_state->hw.pipe_mode;
11875 	int linetime_wm;
11876 
11877 	if (!crtc_state->hw.enable)
11878 		return 0;
11879 
11880 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
11881 					pipe_mode->crtc_clock);
11882 
11883 	return min(linetime_wm, 0x1ff);
11884 }
11885 
11886 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
11887 			       const struct intel_cdclk_state *cdclk_state)
11888 {
11889 	const struct drm_display_mode *pipe_mode =
11890 		&crtc_state->hw.pipe_mode;
11891 	int linetime_wm;
11892 
11893 	if (!crtc_state->hw.enable)
11894 		return 0;
11895 
11896 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
11897 					cdclk_state->logical.cdclk);
11898 
11899 	return min(linetime_wm, 0x1ff);
11900 }
11901 
11902 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
11903 {
11904 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11905 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11906 	const struct drm_display_mode *pipe_mode =
11907 		&crtc_state->hw.pipe_mode;
11908 	int linetime_wm;
11909 
11910 	if (!crtc_state->hw.enable)
11911 		return 0;
11912 
11913 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
11914 				   crtc_state->pixel_rate);
11915 
11916 	/* Display WA #1135: BXT:ALL GLK:ALL */
11917 	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
11918 		linetime_wm /= 2;
11919 
11920 	return min(linetime_wm, 0x1ff);
11921 }
11922 
11923 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
11924 				   struct intel_crtc *crtc)
11925 {
11926 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11927 	struct intel_crtc_state *crtc_state =
11928 		intel_atomic_get_new_crtc_state(state, crtc);
11929 	const struct intel_cdclk_state *cdclk_state;
11930 
11931 	if (INTEL_GEN(dev_priv) >= 9)
11932 		crtc_state->linetime = skl_linetime_wm(crtc_state);
11933 	else
11934 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
11935 
11936 	if (!hsw_crtc_supports_ips(crtc))
11937 		return 0;
11938 
11939 	cdclk_state = intel_atomic_get_cdclk_state(state);
11940 	if (IS_ERR(cdclk_state))
11941 		return PTR_ERR(cdclk_state);
11942 
11943 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
11944 						       cdclk_state);
11945 
11946 	return 0;
11947 }
11948 
11949 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
11950 				   struct intel_crtc *crtc)
11951 {
11952 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11953 	struct intel_crtc_state *crtc_state =
11954 		intel_atomic_get_new_crtc_state(state, crtc);
11955 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
11956 	int ret;
11957 
11958 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11959 	    mode_changed && !crtc_state->hw.active)
11960 		crtc_state->update_wm_post = true;
11961 
11962 	if (mode_changed && crtc_state->hw.enable &&
11963 	    dev_priv->display.crtc_compute_clock &&
11964 	    !crtc_state->bigjoiner_slave &&
11965 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
11966 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
11967 		if (ret)
11968 			return ret;
11969 	}
11970 
11971 	/*
11972 	 * May need to update pipe gamma enable bits
11973 	 * when C8 planes are getting enabled/disabled.
11974 	 */
11975 	if (c8_planes_changed(crtc_state))
11976 		crtc_state->uapi.color_mgmt_changed = true;
11977 
11978 	if (mode_changed || crtc_state->update_pipe ||
11979 	    crtc_state->uapi.color_mgmt_changed) {
11980 		ret = intel_color_check(crtc_state);
11981 		if (ret)
11982 			return ret;
11983 	}
11984 
11985 	if (dev_priv->display.compute_pipe_wm) {
11986 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
11987 		if (ret) {
11988 			drm_dbg_kms(&dev_priv->drm,
11989 				    "Target pipe watermarks are invalid\n");
11990 			return ret;
11991 		}
11992 	}
11993 
11994 	if (dev_priv->display.compute_intermediate_wm) {
11995 		if (drm_WARN_ON(&dev_priv->drm,
11996 				!dev_priv->display.compute_pipe_wm))
11997 			return 0;
11998 
11999 		/*
12000 		 * Calculate 'intermediate' watermarks that satisfy both the
12001 		 * old state and the new state.  We can program these
12002 		 * immediately.
12003 		 */
12004 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12005 		if (ret) {
12006 			drm_dbg_kms(&dev_priv->drm,
12007 				    "No valid intermediate pipe watermarks are possible\n");
12008 			return ret;
12009 		}
12010 	}
12011 
12012 	if (INTEL_GEN(dev_priv) >= 9) {
12013 		if (mode_changed || crtc_state->update_pipe) {
12014 			ret = skl_update_scaler_crtc(crtc_state);
12015 			if (ret)
12016 				return ret;
12017 		}
12018 
12019 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
12020 		if (ret)
12021 			return ret;
12022 	}
12023 
12024 	if (HAS_IPS(dev_priv)) {
12025 		ret = hsw_compute_ips_config(crtc_state);
12026 		if (ret)
12027 			return ret;
12028 	}
12029 
12030 	if (INTEL_GEN(dev_priv) >= 9 ||
12031 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12032 		ret = hsw_compute_linetime_wm(state, crtc);
12033 		if (ret)
12034 			return ret;
12035 
12036 	}
12037 
12038 	if (!mode_changed) {
12039 		ret = intel_psr2_sel_fetch_update(state, crtc);
12040 		if (ret)
12041 			return ret;
12042 	}
12043 
12044 	return 0;
12045 }
12046 
12047 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12048 {
12049 	struct intel_connector *connector;
12050 	struct drm_connector_list_iter conn_iter;
12051 
12052 	drm_connector_list_iter_begin(dev, &conn_iter);
12053 	for_each_intel_connector_iter(connector, &conn_iter) {
12054 		if (connector->base.state->crtc)
12055 			drm_connector_put(&connector->base);
12056 
12057 		if (connector->base.encoder) {
12058 			connector->base.state->best_encoder =
12059 				connector->base.encoder;
12060 			connector->base.state->crtc =
12061 				connector->base.encoder->crtc;
12062 
12063 			drm_connector_get(&connector->base);
12064 		} else {
12065 			connector->base.state->best_encoder = NULL;
12066 			connector->base.state->crtc = NULL;
12067 		}
12068 	}
12069 	drm_connector_list_iter_end(&conn_iter);
12070 }
12071 
12072 static int
12073 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12074 		      struct intel_crtc_state *pipe_config)
12075 {
12076 	struct drm_connector *connector = conn_state->connector;
12077 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12078 	const struct drm_display_info *info = &connector->display_info;
12079 	int bpp;
12080 
12081 	switch (conn_state->max_bpc) {
12082 	case 6 ... 7:
12083 		bpp = 6 * 3;
12084 		break;
12085 	case 8 ... 9:
12086 		bpp = 8 * 3;
12087 		break;
12088 	case 10 ... 11:
12089 		bpp = 10 * 3;
12090 		break;
12091 	case 12 ... 16:
12092 		bpp = 12 * 3;
12093 		break;
12094 	default:
12095 		MISSING_CASE(conn_state->max_bpc);
12096 		return -EINVAL;
12097 	}
12098 
12099 	if (bpp < pipe_config->pipe_bpp) {
12100 		drm_dbg_kms(&i915->drm,
12101 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12102 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12103 			    connector->base.id, connector->name,
12104 			    bpp, 3 * info->bpc,
12105 			    3 * conn_state->max_requested_bpc,
12106 			    pipe_config->pipe_bpp);
12107 
12108 		pipe_config->pipe_bpp = bpp;
12109 	}
12110 
12111 	return 0;
12112 }
12113 
12114 static int
12115 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12116 			  struct intel_crtc_state *pipe_config)
12117 {
12118 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12119 	struct drm_atomic_state *state = pipe_config->uapi.state;
12120 	struct drm_connector *connector;
12121 	struct drm_connector_state *connector_state;
12122 	int bpp, i;
12123 
12124 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12125 	    IS_CHERRYVIEW(dev_priv)))
12126 		bpp = 10*3;
12127 	else if (INTEL_GEN(dev_priv) >= 5)
12128 		bpp = 12*3;
12129 	else
12130 		bpp = 8*3;
12131 
12132 	pipe_config->pipe_bpp = bpp;
12133 
12134 	/* Clamp display bpp to connector max bpp */
12135 	for_each_new_connector_in_state(state, connector, connector_state, i) {
12136 		int ret;
12137 
12138 		if (connector_state->crtc != &crtc->base)
12139 			continue;
12140 
12141 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12142 		if (ret)
12143 			return ret;
12144 	}
12145 
12146 	return 0;
12147 }
12148 
12149 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
12150 				    const struct drm_display_mode *mode)
12151 {
12152 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
12153 		    "type: 0x%x flags: 0x%x\n",
12154 		    mode->crtc_clock,
12155 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
12156 		    mode->crtc_hsync_end, mode->crtc_htotal,
12157 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
12158 		    mode->crtc_vsync_end, mode->crtc_vtotal,
12159 		    mode->type, mode->flags);
12160 }
12161 
12162 static void
12163 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12164 		      const char *id, unsigned int lane_count,
12165 		      const struct intel_link_m_n *m_n)
12166 {
12167 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12168 
12169 	drm_dbg_kms(&i915->drm,
12170 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12171 		    id, lane_count,
12172 		    m_n->gmch_m, m_n->gmch_n,
12173 		    m_n->link_m, m_n->link_n, m_n->tu);
12174 }
12175 
12176 static void
12177 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12178 		     const union hdmi_infoframe *frame)
12179 {
12180 	if (!drm_debug_enabled(DRM_UT_KMS))
12181 		return;
12182 
12183 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12184 }
12185 
12186 static void
12187 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
12188 		      const struct drm_dp_vsc_sdp *vsc)
12189 {
12190 	if (!drm_debug_enabled(DRM_UT_KMS))
12191 		return;
12192 
12193 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
12194 }
12195 
12196 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12197 
12198 static const char * const output_type_str[] = {
12199 	OUTPUT_TYPE(UNUSED),
12200 	OUTPUT_TYPE(ANALOG),
12201 	OUTPUT_TYPE(DVO),
12202 	OUTPUT_TYPE(SDVO),
12203 	OUTPUT_TYPE(LVDS),
12204 	OUTPUT_TYPE(TVOUT),
12205 	OUTPUT_TYPE(HDMI),
12206 	OUTPUT_TYPE(DP),
12207 	OUTPUT_TYPE(EDP),
12208 	OUTPUT_TYPE(DSI),
12209 	OUTPUT_TYPE(DDI),
12210 	OUTPUT_TYPE(DP_MST),
12211 };
12212 
12213 #undef OUTPUT_TYPE
12214 
12215 static void snprintf_output_types(char *buf, size_t len,
12216 				  unsigned int output_types)
12217 {
12218 	char *str = buf;
12219 	int i;
12220 
12221 	str[0] = '\0';
12222 
12223 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12224 		int r;
12225 
12226 		if ((output_types & BIT(i)) == 0)
12227 			continue;
12228 
12229 		r = snprintf(str, len, "%s%s",
12230 			     str != buf ? "," : "", output_type_str[i]);
12231 		if (r >= len)
12232 			break;
12233 		str += r;
12234 		len -= r;
12235 
12236 		output_types &= ~BIT(i);
12237 	}
12238 
12239 	WARN_ON_ONCE(output_types != 0);
12240 }
12241 
12242 static const char * const output_format_str[] = {
12243 	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12244 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12245 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12246 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12247 };
12248 
12249 static const char *output_formats(enum intel_output_format format)
12250 {
12251 	if (format >= ARRAY_SIZE(output_format_str))
12252 		format = INTEL_OUTPUT_FORMAT_INVALID;
12253 	return output_format_str[format];
12254 }
12255 
12256 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12257 {
12258 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12259 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
12260 	const struct drm_framebuffer *fb = plane_state->hw.fb;
12261 	struct drm_format_name_buf format_name;
12262 
12263 	if (!fb) {
12264 		drm_dbg_kms(&i915->drm,
12265 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12266 			    plane->base.base.id, plane->base.name,
12267 			    yesno(plane_state->uapi.visible));
12268 		return;
12269 	}
12270 
12271 	drm_dbg_kms(&i915->drm,
12272 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
12273 		    plane->base.base.id, plane->base.name,
12274 		    fb->base.id, fb->width, fb->height,
12275 		    drm_get_format_name(fb->format->format, &format_name),
12276 		    fb->modifier, yesno(plane_state->uapi.visible));
12277 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
12278 		    plane_state->hw.rotation, plane_state->scaler_id);
12279 	if (plane_state->uapi.visible)
12280 		drm_dbg_kms(&i915->drm,
12281 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12282 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
12283 			    DRM_RECT_ARG(&plane_state->uapi.dst));
12284 }
12285 
12286 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12287 				   struct intel_atomic_state *state,
12288 				   const char *context)
12289 {
12290 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12291 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12292 	const struct intel_plane_state *plane_state;
12293 	struct intel_plane *plane;
12294 	char buf[64];
12295 	int i;
12296 
12297 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
12298 		    crtc->base.base.id, crtc->base.name,
12299 		    yesno(pipe_config->hw.enable), context);
12300 
12301 	if (!pipe_config->hw.enable)
12302 		goto dump_planes;
12303 
12304 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12305 	drm_dbg_kms(&dev_priv->drm,
12306 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
12307 		    yesno(pipe_config->hw.active),
12308 		    buf, pipe_config->output_types,
12309 		    output_formats(pipe_config->output_format));
12310 
12311 	drm_dbg_kms(&dev_priv->drm,
12312 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12313 		    transcoder_name(pipe_config->cpu_transcoder),
12314 		    pipe_config->pipe_bpp, pipe_config->dither);
12315 
12316 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
12317 		    transcoder_name(pipe_config->mst_master_transcoder));
12318 
12319 	drm_dbg_kms(&dev_priv->drm,
12320 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
12321 		    transcoder_name(pipe_config->master_transcoder),
12322 		    pipe_config->sync_mode_slaves_mask);
12323 
12324 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
12325 		    pipe_config->bigjoiner_slave ? "slave" :
12326 		    pipe_config->bigjoiner ? "master" : "no");
12327 
12328 	if (pipe_config->has_pch_encoder)
12329 		intel_dump_m_n_config(pipe_config, "fdi",
12330 				      pipe_config->fdi_lanes,
12331 				      &pipe_config->fdi_m_n);
12332 
12333 	if (intel_crtc_has_dp_encoder(pipe_config)) {
12334 		intel_dump_m_n_config(pipe_config, "dp m_n",
12335 				pipe_config->lane_count, &pipe_config->dp_m_n);
12336 		if (pipe_config->has_drrs)
12337 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
12338 					      pipe_config->lane_count,
12339 					      &pipe_config->dp_m2_n2);
12340 	}
12341 
12342 	drm_dbg_kms(&dev_priv->drm,
12343 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12344 		    pipe_config->has_audio, pipe_config->has_infoframe,
12345 		    pipe_config->infoframes.enable);
12346 
12347 	if (pipe_config->infoframes.enable &
12348 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12349 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
12350 			    pipe_config->infoframes.gcp);
12351 	if (pipe_config->infoframes.enable &
12352 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12353 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12354 	if (pipe_config->infoframes.enable &
12355 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12356 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12357 	if (pipe_config->infoframes.enable &
12358 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12359 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12360 	if (pipe_config->infoframes.enable &
12361 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
12362 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
12363 	if (pipe_config->infoframes.enable &
12364 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
12365 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
12366 	if (pipe_config->infoframes.enable &
12367 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
12368 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
12369 
12370 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
12371 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
12372 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
12373 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
12374 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
12375 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
12376 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
12377 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
12378 	drm_dbg_kms(&dev_priv->drm,
12379 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12380 		    pipe_config->port_clock,
12381 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12382 		    pipe_config->pixel_rate);
12383 
12384 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
12385 		    pipe_config->linetime, pipe_config->ips_linetime);
12386 
12387 	if (INTEL_GEN(dev_priv) >= 9)
12388 		drm_dbg_kms(&dev_priv->drm,
12389 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12390 			    crtc->num_scalers,
12391 			    pipe_config->scaler_state.scaler_users,
12392 			    pipe_config->scaler_state.scaler_id);
12393 
12394 	if (HAS_GMCH(dev_priv))
12395 		drm_dbg_kms(&dev_priv->drm,
12396 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12397 			    pipe_config->gmch_pfit.control,
12398 			    pipe_config->gmch_pfit.pgm_ratios,
12399 			    pipe_config->gmch_pfit.lvds_border_bits);
12400 	else
12401 		drm_dbg_kms(&dev_priv->drm,
12402 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
12403 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
12404 			    enableddisabled(pipe_config->pch_pfit.enabled),
12405 			    yesno(pipe_config->pch_pfit.force_thru));
12406 
12407 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
12408 		    pipe_config->ips_enabled, pipe_config->double_wide);
12409 
12410 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12411 
12412 	if (IS_CHERRYVIEW(dev_priv))
12413 		drm_dbg_kms(&dev_priv->drm,
12414 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12415 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
12416 			    pipe_config->gamma_enable, pipe_config->csc_enable);
12417 	else
12418 		drm_dbg_kms(&dev_priv->drm,
12419 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12420 			    pipe_config->csc_mode, pipe_config->gamma_mode,
12421 			    pipe_config->gamma_enable, pipe_config->csc_enable);
12422 
12423 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
12424 		    pipe_config->hw.degamma_lut ?
12425 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
12426 		    pipe_config->hw.gamma_lut ?
12427 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
12428 
12429 dump_planes:
12430 	if (!state)
12431 		return;
12432 
12433 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12434 		if (plane->pipe == crtc->pipe)
12435 			intel_dump_plane_state(plane_state);
12436 	}
12437 }
12438 
12439 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12440 {
12441 	struct drm_device *dev = state->base.dev;
12442 	struct drm_connector *connector;
12443 	struct drm_connector_list_iter conn_iter;
12444 	unsigned int used_ports = 0;
12445 	unsigned int used_mst_ports = 0;
12446 	bool ret = true;
12447 
12448 	/*
12449 	 * We're going to peek into connector->state,
12450 	 * hence connection_mutex must be held.
12451 	 */
12452 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12453 
12454 	/*
12455 	 * Walk the connector list instead of the encoder
12456 	 * list to detect the problem on ddi platforms
12457 	 * where there's just one encoder per digital port.
12458 	 */
12459 	drm_connector_list_iter_begin(dev, &conn_iter);
12460 	drm_for_each_connector_iter(connector, &conn_iter) {
12461 		struct drm_connector_state *connector_state;
12462 		struct intel_encoder *encoder;
12463 
12464 		connector_state =
12465 			drm_atomic_get_new_connector_state(&state->base,
12466 							   connector);
12467 		if (!connector_state)
12468 			connector_state = connector->state;
12469 
12470 		if (!connector_state->best_encoder)
12471 			continue;
12472 
12473 		encoder = to_intel_encoder(connector_state->best_encoder);
12474 
12475 		drm_WARN_ON(dev, !connector_state->crtc);
12476 
12477 		switch (encoder->type) {
12478 		case INTEL_OUTPUT_DDI:
12479 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
12480 				break;
12481 			fallthrough;
12482 		case INTEL_OUTPUT_DP:
12483 		case INTEL_OUTPUT_HDMI:
12484 		case INTEL_OUTPUT_EDP:
12485 			/* the same port mustn't appear more than once */
12486 			if (used_ports & BIT(encoder->port))
12487 				ret = false;
12488 
12489 			used_ports |= BIT(encoder->port);
12490 			break;
12491 		case INTEL_OUTPUT_DP_MST:
12492 			used_mst_ports |=
12493 				1 << encoder->port;
12494 			break;
12495 		default:
12496 			break;
12497 		}
12498 	}
12499 	drm_connector_list_iter_end(&conn_iter);
12500 
12501 	/* can't mix MST and SST/HDMI on the same port */
12502 	if (used_ports & used_mst_ports)
12503 		return false;
12504 
12505 	return ret;
12506 }
12507 
12508 static void
12509 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
12510 					   struct intel_crtc_state *crtc_state)
12511 {
12512 	const struct intel_crtc_state *from_crtc_state = crtc_state;
12513 
12514 	if (crtc_state->bigjoiner_slave) {
12515 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
12516 								  crtc_state->bigjoiner_linked_crtc);
12517 
12518 		/* No need to copy state if the master state is unchanged */
12519 		if (!from_crtc_state)
12520 			return;
12521 	}
12522 
12523 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
12524 }
12525 
12526 static void
12527 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
12528 				 struct intel_crtc_state *crtc_state)
12529 {
12530 	crtc_state->hw.enable = crtc_state->uapi.enable;
12531 	crtc_state->hw.active = crtc_state->uapi.active;
12532 	crtc_state->hw.mode = crtc_state->uapi.mode;
12533 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
12534 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
12535 
12536 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
12537 }
12538 
12539 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
12540 {
12541 	if (crtc_state->bigjoiner_slave)
12542 		return;
12543 
12544 	crtc_state->uapi.enable = crtc_state->hw.enable;
12545 	crtc_state->uapi.active = crtc_state->hw.active;
12546 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
12547 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
12548 
12549 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
12550 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
12551 
12552 	/* copy color blobs to uapi */
12553 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
12554 				  crtc_state->hw.degamma_lut);
12555 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
12556 				  crtc_state->hw.gamma_lut);
12557 	drm_property_replace_blob(&crtc_state->uapi.ctm,
12558 				  crtc_state->hw.ctm);
12559 }
12560 
12561 static int
12562 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
12563 			  const struct intel_crtc_state *from_crtc_state)
12564 {
12565 	struct intel_crtc_state *saved_state;
12566 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12567 
12568 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
12569 	if (!saved_state)
12570 		return -ENOMEM;
12571 
12572 	saved_state->uapi = crtc_state->uapi;
12573 	saved_state->scaler_state = crtc_state->scaler_state;
12574 	saved_state->shared_dpll = crtc_state->shared_dpll;
12575 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12576 	saved_state->crc_enabled = crtc_state->crc_enabled;
12577 
12578 	intel_crtc_free_hw_state(crtc_state);
12579 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12580 	kfree(saved_state);
12581 
12582 	/* Re-init hw state */
12583 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
12584 	crtc_state->hw.enable = from_crtc_state->hw.enable;
12585 	crtc_state->hw.active = from_crtc_state->hw.active;
12586 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
12587 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
12588 
12589 	/* Some fixups */
12590 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
12591 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
12592 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
12593 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
12594 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
12595 	crtc_state->bigjoiner_slave = true;
12596 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
12597 	crtc_state->has_audio = false;
12598 
12599 	return 0;
12600 }
12601 
12602 static int
12603 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
12604 				 struct intel_crtc_state *crtc_state)
12605 {
12606 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12607 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12608 	struct intel_crtc_state *saved_state;
12609 
12610 	saved_state = intel_crtc_state_alloc(crtc);
12611 	if (!saved_state)
12612 		return -ENOMEM;
12613 
12614 	/* free the old crtc_state->hw members */
12615 	intel_crtc_free_hw_state(crtc_state);
12616 
12617 	/* FIXME: before the switch to atomic started, a new pipe_config was
12618 	 * kzalloc'd. Code that depends on any field being zero should be
12619 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12620 	 * only fields that are know to not cause problems are preserved. */
12621 
12622 	saved_state->uapi = crtc_state->uapi;
12623 	saved_state->scaler_state = crtc_state->scaler_state;
12624 	saved_state->shared_dpll = crtc_state->shared_dpll;
12625 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12626 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12627 	       sizeof(saved_state->icl_port_dplls));
12628 	saved_state->crc_enabled = crtc_state->crc_enabled;
12629 	if (IS_G4X(dev_priv) ||
12630 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12631 		saved_state->wm = crtc_state->wm;
12632 
12633 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
12634 	kfree(saved_state);
12635 
12636 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
12637 
12638 	return 0;
12639 }
12640 
12641 static int
12642 intel_modeset_pipe_config(struct intel_atomic_state *state,
12643 			  struct intel_crtc_state *pipe_config)
12644 {
12645 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
12646 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12647 	struct drm_connector *connector;
12648 	struct drm_connector_state *connector_state;
12649 	int base_bpp, ret, i;
12650 	bool retry = true;
12651 
12652 	pipe_config->cpu_transcoder =
12653 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12654 
12655 	/*
12656 	 * Sanitize sync polarity flags based on requested ones. If neither
12657 	 * positive or negative polarity is requested, treat this as meaning
12658 	 * negative polarity.
12659 	 */
12660 	if (!(pipe_config->hw.adjusted_mode.flags &
12661 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12662 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12663 
12664 	if (!(pipe_config->hw.adjusted_mode.flags &
12665 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12666 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12667 
12668 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12669 					pipe_config);
12670 	if (ret)
12671 		return ret;
12672 
12673 	base_bpp = pipe_config->pipe_bpp;
12674 
12675 	/*
12676 	 * Determine the real pipe dimensions. Note that stereo modes can
12677 	 * increase the actual pipe size due to the frame doubling and
12678 	 * insertion of additional space for blanks between the frame. This
12679 	 * is stored in the crtc timings. We use the requested mode to do this
12680 	 * computation to clearly distinguish it from the adjusted mode, which
12681 	 * can be changed by the connectors in the below retry loop.
12682 	 */
12683 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
12684 			       &pipe_config->pipe_src_w,
12685 			       &pipe_config->pipe_src_h);
12686 
12687 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12688 		struct intel_encoder *encoder =
12689 			to_intel_encoder(connector_state->best_encoder);
12690 
12691 		if (connector_state->crtc != crtc)
12692 			continue;
12693 
12694 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12695 			drm_dbg_kms(&i915->drm,
12696 				    "rejecting invalid cloning configuration\n");
12697 			return -EINVAL;
12698 		}
12699 
12700 		/*
12701 		 * Determine output_types before calling the .compute_config()
12702 		 * hooks so that the hooks can use this information safely.
12703 		 */
12704 		if (encoder->compute_output_type)
12705 			pipe_config->output_types |=
12706 				BIT(encoder->compute_output_type(encoder, pipe_config,
12707 								 connector_state));
12708 		else
12709 			pipe_config->output_types |= BIT(encoder->type);
12710 	}
12711 
12712 encoder_retry:
12713 	/* Ensure the port clock defaults are reset when retrying. */
12714 	pipe_config->port_clock = 0;
12715 	pipe_config->pixel_multiplier = 1;
12716 
12717 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12718 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
12719 			      CRTC_STEREO_DOUBLE);
12720 
12721 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12722 	 * adjust it according to limitations or connector properties, and also
12723 	 * a chance to reject the mode entirely.
12724 	 */
12725 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12726 		struct intel_encoder *encoder =
12727 			to_intel_encoder(connector_state->best_encoder);
12728 
12729 		if (connector_state->crtc != crtc)
12730 			continue;
12731 
12732 		ret = encoder->compute_config(encoder, pipe_config,
12733 					      connector_state);
12734 		if (ret < 0) {
12735 			if (ret != -EDEADLK)
12736 				drm_dbg_kms(&i915->drm,
12737 					    "Encoder config failure: %d\n",
12738 					    ret);
12739 			return ret;
12740 		}
12741 	}
12742 
12743 	/* Set default port clock if not overwritten by the encoder. Needs to be
12744 	 * done afterwards in case the encoder adjusts the mode. */
12745 	if (!pipe_config->port_clock)
12746 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
12747 			* pipe_config->pixel_multiplier;
12748 
12749 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12750 	if (ret == -EDEADLK)
12751 		return ret;
12752 	if (ret < 0) {
12753 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
12754 		return ret;
12755 	}
12756 
12757 	if (ret == RETRY) {
12758 		if (drm_WARN(&i915->drm, !retry,
12759 			     "loop in pipe configuration computation\n"))
12760 			return -EINVAL;
12761 
12762 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
12763 		retry = false;
12764 		goto encoder_retry;
12765 	}
12766 
12767 	/* Dithering seems to not pass-through bits correctly when it should, so
12768 	 * only enable it on 6bpc panels and when its not a compliance
12769 	 * test requesting 6bpc video pattern.
12770 	 */
12771 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12772 		!pipe_config->dither_force_disable;
12773 	drm_dbg_kms(&i915->drm,
12774 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12775 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12776 
12777 	return 0;
12778 }
12779 
12780 static int
12781 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
12782 {
12783 	struct intel_atomic_state *state =
12784 		to_intel_atomic_state(crtc_state->uapi.state);
12785 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12786 	struct drm_connector_state *conn_state;
12787 	struct drm_connector *connector;
12788 	int i;
12789 
12790 	for_each_new_connector_in_state(&state->base, connector,
12791 					conn_state, i) {
12792 		struct intel_encoder *encoder =
12793 			to_intel_encoder(conn_state->best_encoder);
12794 		int ret;
12795 
12796 		if (conn_state->crtc != &crtc->base ||
12797 		    !encoder->compute_config_late)
12798 			continue;
12799 
12800 		ret = encoder->compute_config_late(encoder, crtc_state,
12801 						   conn_state);
12802 		if (ret)
12803 			return ret;
12804 	}
12805 
12806 	return 0;
12807 }
12808 
12809 bool intel_fuzzy_clock_check(int clock1, int clock2)
12810 {
12811 	int diff;
12812 
12813 	if (clock1 == clock2)
12814 		return true;
12815 
12816 	if (!clock1 || !clock2)
12817 		return false;
12818 
12819 	diff = abs(clock1 - clock2);
12820 
12821 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12822 		return true;
12823 
12824 	return false;
12825 }
12826 
12827 static bool
12828 intel_compare_m_n(unsigned int m, unsigned int n,
12829 		  unsigned int m2, unsigned int n2,
12830 		  bool exact)
12831 {
12832 	if (m == m2 && n == n2)
12833 		return true;
12834 
12835 	if (exact || !m || !n || !m2 || !n2)
12836 		return false;
12837 
12838 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12839 
12840 	if (n > n2) {
12841 		while (n > n2) {
12842 			m2 <<= 1;
12843 			n2 <<= 1;
12844 		}
12845 	} else if (n < n2) {
12846 		while (n < n2) {
12847 			m <<= 1;
12848 			n <<= 1;
12849 		}
12850 	}
12851 
12852 	if (n != n2)
12853 		return false;
12854 
12855 	return intel_fuzzy_clock_check(m, m2);
12856 }
12857 
12858 static bool
12859 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12860 		       const struct intel_link_m_n *m2_n2,
12861 		       bool exact)
12862 {
12863 	return m_n->tu == m2_n2->tu &&
12864 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12865 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12866 		intel_compare_m_n(m_n->link_m, m_n->link_n,
12867 				  m2_n2->link_m, m2_n2->link_n, exact);
12868 }
12869 
12870 static bool
12871 intel_compare_infoframe(const union hdmi_infoframe *a,
12872 			const union hdmi_infoframe *b)
12873 {
12874 	return memcmp(a, b, sizeof(*a)) == 0;
12875 }
12876 
12877 static bool
12878 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
12879 			 const struct drm_dp_vsc_sdp *b)
12880 {
12881 	return memcmp(a, b, sizeof(*a)) == 0;
12882 }
12883 
12884 static void
12885 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12886 			       bool fastset, const char *name,
12887 			       const union hdmi_infoframe *a,
12888 			       const union hdmi_infoframe *b)
12889 {
12890 	if (fastset) {
12891 		if (!drm_debug_enabled(DRM_UT_KMS))
12892 			return;
12893 
12894 		drm_dbg_kms(&dev_priv->drm,
12895 			    "fastset mismatch in %s infoframe\n", name);
12896 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
12897 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12898 		drm_dbg_kms(&dev_priv->drm, "found:\n");
12899 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12900 	} else {
12901 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
12902 		drm_err(&dev_priv->drm, "expected:\n");
12903 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12904 		drm_err(&dev_priv->drm, "found:\n");
12905 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12906 	}
12907 }
12908 
12909 static void
12910 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
12911 				bool fastset, const char *name,
12912 				const struct drm_dp_vsc_sdp *a,
12913 				const struct drm_dp_vsc_sdp *b)
12914 {
12915 	if (fastset) {
12916 		if (!drm_debug_enabled(DRM_UT_KMS))
12917 			return;
12918 
12919 		drm_dbg_kms(&dev_priv->drm,
12920 			    "fastset mismatch in %s dp sdp\n", name);
12921 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
12922 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
12923 		drm_dbg_kms(&dev_priv->drm, "found:\n");
12924 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
12925 	} else {
12926 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
12927 		drm_err(&dev_priv->drm, "expected:\n");
12928 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
12929 		drm_err(&dev_priv->drm, "found:\n");
12930 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
12931 	}
12932 }
12933 
12934 static void __printf(4, 5)
12935 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12936 		     const char *name, const char *format, ...)
12937 {
12938 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
12939 	struct va_format vaf;
12940 	va_list args;
12941 
12942 	va_start(args, format);
12943 	vaf.fmt = format;
12944 	vaf.va = &args;
12945 
12946 	if (fastset)
12947 		drm_dbg_kms(&i915->drm,
12948 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
12949 			    crtc->base.base.id, crtc->base.name, name, &vaf);
12950 	else
12951 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
12952 			crtc->base.base.id, crtc->base.name, name, &vaf);
12953 
12954 	va_end(args);
12955 }
12956 
12957 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12958 {
12959 	if (dev_priv->params.fastboot != -1)
12960 		return dev_priv->params.fastboot;
12961 
12962 	/* Enable fastboot by default on Skylake and newer */
12963 	if (INTEL_GEN(dev_priv) >= 9)
12964 		return true;
12965 
12966 	/* Enable fastboot by default on VLV and CHV */
12967 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12968 		return true;
12969 
12970 	/* Disabled by default on all others */
12971 	return false;
12972 }
12973 
12974 static bool
12975 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12976 			  const struct intel_crtc_state *pipe_config,
12977 			  bool fastset)
12978 {
12979 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
12980 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12981 	bool ret = true;
12982 	u32 bp_gamma = 0;
12983 	bool fixup_inherited = fastset &&
12984 		current_config->inherited && !pipe_config->inherited;
12985 
12986 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12987 		drm_dbg_kms(&dev_priv->drm,
12988 			    "initial modeset and fastboot not set\n");
12989 		ret = false;
12990 	}
12991 
12992 #define PIPE_CONF_CHECK_X(name) do { \
12993 	if (current_config->name != pipe_config->name) { \
12994 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
12995 				     "(expected 0x%08x, found 0x%08x)", \
12996 				     current_config->name, \
12997 				     pipe_config->name); \
12998 		ret = false; \
12999 	} \
13000 } while (0)
13001 
13002 #define PIPE_CONF_CHECK_I(name) do { \
13003 	if (current_config->name != pipe_config->name) { \
13004 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13005 				     "(expected %i, found %i)", \
13006 				     current_config->name, \
13007 				     pipe_config->name); \
13008 		ret = false; \
13009 	} \
13010 } while (0)
13011 
13012 #define PIPE_CONF_CHECK_BOOL(name) do { \
13013 	if (current_config->name != pipe_config->name) { \
13014 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13015 				     "(expected %s, found %s)", \
13016 				     yesno(current_config->name), \
13017 				     yesno(pipe_config->name)); \
13018 		ret = false; \
13019 	} \
13020 } while (0)
13021 
13022 /*
13023  * Checks state where we only read out the enabling, but not the entire
13024  * state itself (like full infoframes or ELD for audio). These states
13025  * require a full modeset on bootup to fix up.
13026  */
13027 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13028 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13029 		PIPE_CONF_CHECK_BOOL(name); \
13030 	} else { \
13031 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13032 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13033 				     yesno(current_config->name), \
13034 				     yesno(pipe_config->name)); \
13035 		ret = false; \
13036 	} \
13037 } while (0)
13038 
13039 #define PIPE_CONF_CHECK_P(name) do { \
13040 	if (current_config->name != pipe_config->name) { \
13041 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13042 				     "(expected %p, found %p)", \
13043 				     current_config->name, \
13044 				     pipe_config->name); \
13045 		ret = false; \
13046 	} \
13047 } while (0)
13048 
13049 #define PIPE_CONF_CHECK_M_N(name) do { \
13050 	if (!intel_compare_link_m_n(&current_config->name, \
13051 				    &pipe_config->name,\
13052 				    !fastset)) { \
13053 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13054 				     "(expected tu %i gmch %i/%i link %i/%i, " \
13055 				     "found tu %i, gmch %i/%i link %i/%i)", \
13056 				     current_config->name.tu, \
13057 				     current_config->name.gmch_m, \
13058 				     current_config->name.gmch_n, \
13059 				     current_config->name.link_m, \
13060 				     current_config->name.link_n, \
13061 				     pipe_config->name.tu, \
13062 				     pipe_config->name.gmch_m, \
13063 				     pipe_config->name.gmch_n, \
13064 				     pipe_config->name.link_m, \
13065 				     pipe_config->name.link_n); \
13066 		ret = false; \
13067 	} \
13068 } while (0)
13069 
13070 /* This is required for BDW+ where there is only one set of registers for
13071  * switching between high and low RR.
13072  * This macro can be used whenever a comparison has to be made between one
13073  * hw state and multiple sw state variables.
13074  */
13075 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13076 	if (!intel_compare_link_m_n(&current_config->name, \
13077 				    &pipe_config->name, !fastset) && \
13078 	    !intel_compare_link_m_n(&current_config->alt_name, \
13079 				    &pipe_config->name, !fastset)) { \
13080 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13081 				     "(expected tu %i gmch %i/%i link %i/%i, " \
13082 				     "or tu %i gmch %i/%i link %i/%i, " \
13083 				     "found tu %i, gmch %i/%i link %i/%i)", \
13084 				     current_config->name.tu, \
13085 				     current_config->name.gmch_m, \
13086 				     current_config->name.gmch_n, \
13087 				     current_config->name.link_m, \
13088 				     current_config->name.link_n, \
13089 				     current_config->alt_name.tu, \
13090 				     current_config->alt_name.gmch_m, \
13091 				     current_config->alt_name.gmch_n, \
13092 				     current_config->alt_name.link_m, \
13093 				     current_config->alt_name.link_n, \
13094 				     pipe_config->name.tu, \
13095 				     pipe_config->name.gmch_m, \
13096 				     pipe_config->name.gmch_n, \
13097 				     pipe_config->name.link_m, \
13098 				     pipe_config->name.link_n); \
13099 		ret = false; \
13100 	} \
13101 } while (0)
13102 
13103 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13104 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
13105 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13106 				     "(%x) (expected %i, found %i)", \
13107 				     (mask), \
13108 				     current_config->name & (mask), \
13109 				     pipe_config->name & (mask)); \
13110 		ret = false; \
13111 	} \
13112 } while (0)
13113 
13114 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13115 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13116 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13117 				     "(expected %i, found %i)", \
13118 				     current_config->name, \
13119 				     pipe_config->name); \
13120 		ret = false; \
13121 	} \
13122 } while (0)
13123 
13124 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13125 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
13126 				     &pipe_config->infoframes.name)) { \
13127 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13128 					       &current_config->infoframes.name, \
13129 					       &pipe_config->infoframes.name); \
13130 		ret = false; \
13131 	} \
13132 } while (0)
13133 
13134 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
13135 	if (!current_config->has_psr && !pipe_config->has_psr && \
13136 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
13137 				      &pipe_config->infoframes.name)) { \
13138 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
13139 						&current_config->infoframes.name, \
13140 						&pipe_config->infoframes.name); \
13141 		ret = false; \
13142 	} \
13143 } while (0)
13144 
13145 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13146 	if (current_config->name1 != pipe_config->name1) { \
13147 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13148 				"(expected %i, found %i, won't compare lut values)", \
13149 				current_config->name1, \
13150 				pipe_config->name1); \
13151 		ret = false;\
13152 	} else { \
13153 		if (!intel_color_lut_equal(current_config->name2, \
13154 					pipe_config->name2, pipe_config->name1, \
13155 					bit_precision)) { \
13156 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13157 					"hw_state doesn't match sw_state"); \
13158 			ret = false; \
13159 		} \
13160 	} \
13161 } while (0)
13162 
13163 #define PIPE_CONF_QUIRK(quirk) \
13164 	((current_config->quirks | pipe_config->quirks) & (quirk))
13165 
13166 	PIPE_CONF_CHECK_I(cpu_transcoder);
13167 
13168 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13169 	PIPE_CONF_CHECK_I(fdi_lanes);
13170 	PIPE_CONF_CHECK_M_N(fdi_m_n);
13171 
13172 	PIPE_CONF_CHECK_I(lane_count);
13173 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13174 
13175 	if (INTEL_GEN(dev_priv) < 8) {
13176 		PIPE_CONF_CHECK_M_N(dp_m_n);
13177 
13178 		if (current_config->has_drrs)
13179 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
13180 	} else
13181 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13182 
13183 	PIPE_CONF_CHECK_X(output_types);
13184 
13185 	/* FIXME do the readout properly and get rid of this quirk */
13186 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
13187 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
13188 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
13189 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
13190 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
13191 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
13192 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
13193 
13194 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
13195 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
13196 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
13197 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
13198 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
13199 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
13200 
13201 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13202 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13203 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13204 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13205 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13206 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13207 
13208 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13209 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13210 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13211 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13212 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13213 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13214 
13215 		PIPE_CONF_CHECK_I(pixel_multiplier);
13216 
13217 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13218 				      DRM_MODE_FLAG_INTERLACE);
13219 
13220 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13221 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13222 					      DRM_MODE_FLAG_PHSYNC);
13223 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13224 					      DRM_MODE_FLAG_NHSYNC);
13225 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13226 					      DRM_MODE_FLAG_PVSYNC);
13227 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13228 					      DRM_MODE_FLAG_NVSYNC);
13229 		}
13230 	}
13231 
13232 	PIPE_CONF_CHECK_I(output_format);
13233 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13234 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13235 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13236 		PIPE_CONF_CHECK_BOOL(limited_color_range);
13237 
13238 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13239 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13240 	PIPE_CONF_CHECK_BOOL(has_infoframe);
13241 	/* FIXME do the readout properly and get rid of this quirk */
13242 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
13243 		PIPE_CONF_CHECK_BOOL(fec_enable);
13244 
13245 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13246 
13247 	PIPE_CONF_CHECK_X(gmch_pfit.control);
13248 	/* pfit ratios are autocomputed by the hw on gen4+ */
13249 	if (INTEL_GEN(dev_priv) < 4)
13250 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13251 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13252 
13253 	/*
13254 	 * Changing the EDP transcoder input mux
13255 	 * (A_ONOFF vs. A_ON) requires a full modeset.
13256 	 */
13257 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13258 
13259 	if (!fastset) {
13260 		PIPE_CONF_CHECK_I(pipe_src_w);
13261 		PIPE_CONF_CHECK_I(pipe_src_h);
13262 
13263 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13264 		if (current_config->pch_pfit.enabled) {
13265 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
13266 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
13267 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
13268 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
13269 		}
13270 
13271 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13272 		/* FIXME do the readout properly and get rid of this quirk */
13273 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
13274 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13275 
13276 		PIPE_CONF_CHECK_X(gamma_mode);
13277 		if (IS_CHERRYVIEW(dev_priv))
13278 			PIPE_CONF_CHECK_X(cgm_mode);
13279 		else
13280 			PIPE_CONF_CHECK_X(csc_mode);
13281 		PIPE_CONF_CHECK_BOOL(gamma_enable);
13282 		PIPE_CONF_CHECK_BOOL(csc_enable);
13283 
13284 		PIPE_CONF_CHECK_I(linetime);
13285 		PIPE_CONF_CHECK_I(ips_linetime);
13286 
13287 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13288 		if (bp_gamma)
13289 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13290 	}
13291 
13292 	PIPE_CONF_CHECK_BOOL(double_wide);
13293 
13294 	PIPE_CONF_CHECK_P(shared_dpll);
13295 
13296 	/* FIXME do the readout properly and get rid of this quirk */
13297 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
13298 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13299 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13300 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13301 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13302 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13303 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13304 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13305 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13306 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13307 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13308 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13309 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13310 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13311 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13312 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13313 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13314 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13315 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13316 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13317 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13318 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13319 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13320 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13321 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13322 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13323 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13324 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13325 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13326 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13327 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13328 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13329 
13330 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13331 		PIPE_CONF_CHECK_X(dsi_pll.div);
13332 
13333 		if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13334 			PIPE_CONF_CHECK_I(pipe_bpp);
13335 
13336 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
13337 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13338 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13339 
13340 		PIPE_CONF_CHECK_I(min_voltage_level);
13341 	}
13342 
13343 	PIPE_CONF_CHECK_X(infoframes.enable);
13344 	PIPE_CONF_CHECK_X(infoframes.gcp);
13345 	PIPE_CONF_CHECK_INFOFRAME(avi);
13346 	PIPE_CONF_CHECK_INFOFRAME(spd);
13347 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
13348 	PIPE_CONF_CHECK_INFOFRAME(drm);
13349 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
13350 
13351 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
13352 	PIPE_CONF_CHECK_I(master_transcoder);
13353 	PIPE_CONF_CHECK_BOOL(bigjoiner);
13354 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
13355 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
13356 
13357 	PIPE_CONF_CHECK_I(dsc.compression_enable);
13358 	PIPE_CONF_CHECK_I(dsc.dsc_split);
13359 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
13360 
13361 	PIPE_CONF_CHECK_I(mst_master_transcoder);
13362 
13363 #undef PIPE_CONF_CHECK_X
13364 #undef PIPE_CONF_CHECK_I
13365 #undef PIPE_CONF_CHECK_BOOL
13366 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13367 #undef PIPE_CONF_CHECK_P
13368 #undef PIPE_CONF_CHECK_FLAGS
13369 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13370 #undef PIPE_CONF_CHECK_COLOR_LUT
13371 #undef PIPE_CONF_QUIRK
13372 
13373 	return ret;
13374 }
13375 
13376 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13377 					   const struct intel_crtc_state *pipe_config)
13378 {
13379 	if (pipe_config->has_pch_encoder) {
13380 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13381 							    &pipe_config->fdi_m_n);
13382 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13383 
13384 		/*
13385 		 * FDI already provided one idea for the dotclock.
13386 		 * Yell if the encoder disagrees.
13387 		 */
13388 		drm_WARN(&dev_priv->drm,
13389 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13390 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13391 			 fdi_dotclock, dotclock);
13392 	}
13393 }
13394 
13395 static void verify_wm_state(struct intel_crtc *crtc,
13396 			    struct intel_crtc_state *new_crtc_state)
13397 {
13398 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13399 	struct skl_hw_state {
13400 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13401 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13402 		struct skl_pipe_wm wm;
13403 	} *hw;
13404 	struct skl_pipe_wm *sw_wm;
13405 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13406 	u8 hw_enabled_slices;
13407 	const enum pipe pipe = crtc->pipe;
13408 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
13409 
13410 	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
13411 		return;
13412 
13413 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13414 	if (!hw)
13415 		return;
13416 
13417 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13418 	sw_wm = &new_crtc_state->wm.skl.optimal;
13419 
13420 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13421 
13422 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
13423 
13424 	if (INTEL_GEN(dev_priv) >= 11 &&
13425 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
13426 		drm_err(&dev_priv->drm,
13427 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
13428 			dev_priv->dbuf.enabled_slices,
13429 			hw_enabled_slices);
13430 
13431 	/* planes */
13432 	for_each_universal_plane(dev_priv, pipe, plane) {
13433 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13434 
13435 		hw_plane_wm = &hw->wm.planes[plane];
13436 		sw_plane_wm = &sw_wm->planes[plane];
13437 
13438 		/* Watermarks */
13439 		for (level = 0; level <= max_level; level++) {
13440 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13441 						&sw_plane_wm->wm[level]) ||
13442 			    (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
13443 							       &sw_plane_wm->sagv_wm0)))
13444 				continue;
13445 
13446 			drm_err(&dev_priv->drm,
13447 				"mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13448 				pipe_name(pipe), plane + 1, level,
13449 				sw_plane_wm->wm[level].plane_en,
13450 				sw_plane_wm->wm[level].plane_res_b,
13451 				sw_plane_wm->wm[level].plane_res_l,
13452 				hw_plane_wm->wm[level].plane_en,
13453 				hw_plane_wm->wm[level].plane_res_b,
13454 				hw_plane_wm->wm[level].plane_res_l);
13455 		}
13456 
13457 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13458 					 &sw_plane_wm->trans_wm)) {
13459 			drm_err(&dev_priv->drm,
13460 				"mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13461 				pipe_name(pipe), plane + 1,
13462 				sw_plane_wm->trans_wm.plane_en,
13463 				sw_plane_wm->trans_wm.plane_res_b,
13464 				sw_plane_wm->trans_wm.plane_res_l,
13465 				hw_plane_wm->trans_wm.plane_en,
13466 				hw_plane_wm->trans_wm.plane_res_b,
13467 				hw_plane_wm->trans_wm.plane_res_l);
13468 		}
13469 
13470 		/* DDB */
13471 		hw_ddb_entry = &hw->ddb_y[plane];
13472 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13473 
13474 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13475 			drm_err(&dev_priv->drm,
13476 				"mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13477 				pipe_name(pipe), plane + 1,
13478 				sw_ddb_entry->start, sw_ddb_entry->end,
13479 				hw_ddb_entry->start, hw_ddb_entry->end);
13480 		}
13481 	}
13482 
13483 	/*
13484 	 * cursor
13485 	 * If the cursor plane isn't active, we may not have updated it's ddb
13486 	 * allocation. In that case since the ddb allocation will be updated
13487 	 * once the plane becomes visible, we can skip this check
13488 	 */
13489 	if (1) {
13490 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13491 
13492 		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13493 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13494 
13495 		/* Watermarks */
13496 		for (level = 0; level <= max_level; level++) {
13497 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13498 						&sw_plane_wm->wm[level]) ||
13499 			    (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
13500 							       &sw_plane_wm->sagv_wm0)))
13501 				continue;
13502 
13503 			drm_err(&dev_priv->drm,
13504 				"mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13505 				pipe_name(pipe), level,
13506 				sw_plane_wm->wm[level].plane_en,
13507 				sw_plane_wm->wm[level].plane_res_b,
13508 				sw_plane_wm->wm[level].plane_res_l,
13509 				hw_plane_wm->wm[level].plane_en,
13510 				hw_plane_wm->wm[level].plane_res_b,
13511 				hw_plane_wm->wm[level].plane_res_l);
13512 		}
13513 
13514 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13515 					 &sw_plane_wm->trans_wm)) {
13516 			drm_err(&dev_priv->drm,
13517 				"mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13518 				pipe_name(pipe),
13519 				sw_plane_wm->trans_wm.plane_en,
13520 				sw_plane_wm->trans_wm.plane_res_b,
13521 				sw_plane_wm->trans_wm.plane_res_l,
13522 				hw_plane_wm->trans_wm.plane_en,
13523 				hw_plane_wm->trans_wm.plane_res_b,
13524 				hw_plane_wm->trans_wm.plane_res_l);
13525 		}
13526 
13527 		/* DDB */
13528 		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13529 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13530 
13531 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13532 			drm_err(&dev_priv->drm,
13533 				"mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13534 				pipe_name(pipe),
13535 				sw_ddb_entry->start, sw_ddb_entry->end,
13536 				hw_ddb_entry->start, hw_ddb_entry->end);
13537 		}
13538 	}
13539 
13540 	kfree(hw);
13541 }
13542 
13543 static void
13544 verify_connector_state(struct intel_atomic_state *state,
13545 		       struct intel_crtc *crtc)
13546 {
13547 	struct drm_connector *connector;
13548 	struct drm_connector_state *new_conn_state;
13549 	int i;
13550 
13551 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13552 		struct drm_encoder *encoder = connector->encoder;
13553 		struct intel_crtc_state *crtc_state = NULL;
13554 
13555 		if (new_conn_state->crtc != &crtc->base)
13556 			continue;
13557 
13558 		if (crtc)
13559 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13560 
13561 		intel_connector_verify_state(crtc_state, new_conn_state);
13562 
13563 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13564 		     "connector's atomic encoder doesn't match legacy encoder\n");
13565 	}
13566 }
13567 
13568 static void
13569 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13570 {
13571 	struct intel_encoder *encoder;
13572 	struct drm_connector *connector;
13573 	struct drm_connector_state *old_conn_state, *new_conn_state;
13574 	int i;
13575 
13576 	for_each_intel_encoder(&dev_priv->drm, encoder) {
13577 		bool enabled = false, found = false;
13578 		enum pipe pipe;
13579 
13580 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
13581 			    encoder->base.base.id,
13582 			    encoder->base.name);
13583 
13584 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13585 						   new_conn_state, i) {
13586 			if (old_conn_state->best_encoder == &encoder->base)
13587 				found = true;
13588 
13589 			if (new_conn_state->best_encoder != &encoder->base)
13590 				continue;
13591 			found = enabled = true;
13592 
13593 			I915_STATE_WARN(new_conn_state->crtc !=
13594 					encoder->base.crtc,
13595 			     "connector's crtc doesn't match encoder crtc\n");
13596 		}
13597 
13598 		if (!found)
13599 			continue;
13600 
13601 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
13602 		     "encoder's enabled state mismatch "
13603 		     "(expected %i, found %i)\n",
13604 		     !!encoder->base.crtc, enabled);
13605 
13606 		if (!encoder->base.crtc) {
13607 			bool active;
13608 
13609 			active = encoder->get_hw_state(encoder, &pipe);
13610 			I915_STATE_WARN(active,
13611 			     "encoder detached but still enabled on pipe %c.\n",
13612 			     pipe_name(pipe));
13613 		}
13614 	}
13615 }
13616 
13617 static void
13618 verify_crtc_state(struct intel_crtc *crtc,
13619 		  struct intel_crtc_state *old_crtc_state,
13620 		  struct intel_crtc_state *new_crtc_state)
13621 {
13622 	struct drm_device *dev = crtc->base.dev;
13623 	struct drm_i915_private *dev_priv = to_i915(dev);
13624 	struct intel_encoder *encoder;
13625 	struct intel_crtc_state *pipe_config = old_crtc_state;
13626 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
13627 	struct intel_crtc *master = crtc;
13628 
13629 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
13630 	intel_crtc_free_hw_state(old_crtc_state);
13631 	intel_crtc_state_reset(old_crtc_state, crtc);
13632 	old_crtc_state->uapi.state = state;
13633 
13634 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
13635 		    crtc->base.name);
13636 
13637 	pipe_config->hw.enable = new_crtc_state->hw.enable;
13638 
13639 	intel_crtc_get_pipe_config(pipe_config);
13640 
13641 	/* we keep both pipes enabled on 830 */
13642 	if (IS_I830(dev_priv) && pipe_config->hw.active)
13643 		pipe_config->hw.active = new_crtc_state->hw.active;
13644 
13645 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
13646 			"crtc active state doesn't match with hw state "
13647 			"(expected %i, found %i)\n",
13648 			new_crtc_state->hw.active, pipe_config->hw.active);
13649 
13650 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
13651 			"transitional active state does not match atomic hw state "
13652 			"(expected %i, found %i)\n",
13653 			new_crtc_state->hw.active, crtc->active);
13654 
13655 	if (new_crtc_state->bigjoiner_slave)
13656 		master = new_crtc_state->bigjoiner_linked_crtc;
13657 
13658 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
13659 		enum pipe pipe;
13660 		bool active;
13661 
13662 		active = encoder->get_hw_state(encoder, &pipe);
13663 		I915_STATE_WARN(active != new_crtc_state->hw.active,
13664 				"[ENCODER:%i] active %i with crtc active %i\n",
13665 				encoder->base.base.id, active,
13666 				new_crtc_state->hw.active);
13667 
13668 		I915_STATE_WARN(active && master->pipe != pipe,
13669 				"Encoder connected to wrong pipe %c\n",
13670 				pipe_name(pipe));
13671 
13672 		if (active)
13673 			intel_encoder_get_config(encoder, pipe_config);
13674 	}
13675 
13676 	if (!new_crtc_state->hw.active)
13677 		return;
13678 
13679 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
13680 
13681 	if (!intel_pipe_config_compare(new_crtc_state,
13682 				       pipe_config, false)) {
13683 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
13684 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13685 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13686 	}
13687 }
13688 
13689 static void
13690 intel_verify_planes(struct intel_atomic_state *state)
13691 {
13692 	struct intel_plane *plane;
13693 	const struct intel_plane_state *plane_state;
13694 	int i;
13695 
13696 	for_each_new_intel_plane_in_state(state, plane,
13697 					  plane_state, i)
13698 		assert_plane(plane, plane_state->planar_slave ||
13699 			     plane_state->uapi.visible);
13700 }
13701 
13702 static void
13703 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13704 			 struct intel_shared_dpll *pll,
13705 			 struct intel_crtc *crtc,
13706 			 struct intel_crtc_state *new_crtc_state)
13707 {
13708 	struct intel_dpll_hw_state dpll_hw_state;
13709 	unsigned int crtc_mask;
13710 	bool active;
13711 
13712 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13713 
13714 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
13715 
13716 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
13717 
13718 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13719 		I915_STATE_WARN(!pll->on && pll->active_mask,
13720 		     "pll in active use but not on in sw tracking\n");
13721 		I915_STATE_WARN(pll->on && !pll->active_mask,
13722 		     "pll is on but not used by any active crtc\n");
13723 		I915_STATE_WARN(pll->on != active,
13724 		     "pll on state mismatch (expected %i, found %i)\n",
13725 		     pll->on, active);
13726 	}
13727 
13728 	if (!crtc) {
13729 		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13730 				"more active pll users than references: %x vs %x\n",
13731 				pll->active_mask, pll->state.crtc_mask);
13732 
13733 		return;
13734 	}
13735 
13736 	crtc_mask = drm_crtc_mask(&crtc->base);
13737 
13738 	if (new_crtc_state->hw.active)
13739 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13740 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13741 				pipe_name(crtc->pipe), pll->active_mask);
13742 	else
13743 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13744 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13745 				pipe_name(crtc->pipe), pll->active_mask);
13746 
13747 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13748 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13749 			crtc_mask, pll->state.crtc_mask);
13750 
13751 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13752 					  &dpll_hw_state,
13753 					  sizeof(dpll_hw_state)),
13754 			"pll hw state mismatch\n");
13755 }
13756 
13757 static void
13758 verify_shared_dpll_state(struct intel_crtc *crtc,
13759 			 struct intel_crtc_state *old_crtc_state,
13760 			 struct intel_crtc_state *new_crtc_state)
13761 {
13762 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13763 
13764 	if (new_crtc_state->shared_dpll)
13765 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13766 
13767 	if (old_crtc_state->shared_dpll &&
13768 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13769 		unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13770 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13771 
13772 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13773 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
13774 				pipe_name(crtc->pipe));
13775 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13776 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
13777 				pipe_name(crtc->pipe));
13778 	}
13779 }
13780 
13781 static void
13782 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13783 			  struct intel_atomic_state *state,
13784 			  struct intel_crtc_state *old_crtc_state,
13785 			  struct intel_crtc_state *new_crtc_state)
13786 {
13787 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13788 		return;
13789 
13790 	verify_wm_state(crtc, new_crtc_state);
13791 	verify_connector_state(state, crtc);
13792 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13793 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13794 }
13795 
13796 static void
13797 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13798 {
13799 	int i;
13800 
13801 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
13802 		verify_single_dpll_state(dev_priv,
13803 					 &dev_priv->dpll.shared_dplls[i],
13804 					 NULL, NULL);
13805 }
13806 
13807 static void
13808 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13809 			      struct intel_atomic_state *state)
13810 {
13811 	verify_encoder_state(dev_priv, state);
13812 	verify_connector_state(state, NULL);
13813 	verify_disabled_dpll_state(dev_priv);
13814 }
13815 
13816 static void
13817 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13818 {
13819 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13820 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13821 	const struct drm_display_mode *adjusted_mode =
13822 		&crtc_state->hw.adjusted_mode;
13823 
13824 	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13825 
13826 	crtc->mode_flags = crtc_state->mode_flags;
13827 
13828 	/*
13829 	 * The scanline counter increments at the leading edge of hsync.
13830 	 *
13831 	 * On most platforms it starts counting from vtotal-1 on the
13832 	 * first active line. That means the scanline counter value is
13833 	 * always one less than what we would expect. Ie. just after
13834 	 * start of vblank, which also occurs at start of hsync (on the
13835 	 * last active line), the scanline counter will read vblank_start-1.
13836 	 *
13837 	 * On gen2 the scanline counter starts counting from 1 instead
13838 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13839 	 * to keep the value positive), instead of adding one.
13840 	 *
13841 	 * On HSW+ the behaviour of the scanline counter depends on the output
13842 	 * type. For DP ports it behaves like most other platforms, but on HDMI
13843 	 * there's an extra 1 line difference. So we need to add two instead of
13844 	 * one to the value.
13845 	 *
13846 	 * On VLV/CHV DSI the scanline counter would appear to increment
13847 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13848 	 * that means we can't tell whether we're in vblank or not while
13849 	 * we're on that particular line. We must still set scanline_offset
13850 	 * to 1 so that the vblank timestamps come out correct when we query
13851 	 * the scanline counter from within the vblank interrupt handler.
13852 	 * However if queried just before the start of vblank we'll get an
13853 	 * answer that's slightly in the future.
13854 	 */
13855 	if (IS_GEN(dev_priv, 2)) {
13856 		int vtotal;
13857 
13858 		vtotal = adjusted_mode->crtc_vtotal;
13859 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13860 			vtotal /= 2;
13861 
13862 		crtc->scanline_offset = vtotal - 1;
13863 	} else if (HAS_DDI(dev_priv) &&
13864 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13865 		crtc->scanline_offset = 2;
13866 	} else {
13867 		crtc->scanline_offset = 1;
13868 	}
13869 }
13870 
13871 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13872 {
13873 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13874 	struct intel_crtc_state *new_crtc_state;
13875 	struct intel_crtc *crtc;
13876 	int i;
13877 
13878 	if (!dev_priv->display.crtc_compute_clock)
13879 		return;
13880 
13881 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13882 		if (!intel_crtc_needs_modeset(new_crtc_state))
13883 			continue;
13884 
13885 		intel_release_shared_dplls(state, crtc);
13886 	}
13887 }
13888 
13889 /*
13890  * This implements the workaround described in the "notes" section of the mode
13891  * set sequence documentation. When going from no pipes or single pipe to
13892  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13893  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13894  */
13895 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
13896 {
13897 	struct intel_crtc_state *crtc_state;
13898 	struct intel_crtc *crtc;
13899 	struct intel_crtc_state *first_crtc_state = NULL;
13900 	struct intel_crtc_state *other_crtc_state = NULL;
13901 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13902 	int i;
13903 
13904 	/* look at all crtc's that are going to be enabled in during modeset */
13905 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13906 		if (!crtc_state->hw.active ||
13907 		    !intel_crtc_needs_modeset(crtc_state))
13908 			continue;
13909 
13910 		if (first_crtc_state) {
13911 			other_crtc_state = crtc_state;
13912 			break;
13913 		} else {
13914 			first_crtc_state = crtc_state;
13915 			first_pipe = crtc->pipe;
13916 		}
13917 	}
13918 
13919 	/* No workaround needed? */
13920 	if (!first_crtc_state)
13921 		return 0;
13922 
13923 	/* w/a possibly needed, check how many crtc's are already enabled. */
13924 	for_each_intel_crtc(state->base.dev, crtc) {
13925 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13926 		if (IS_ERR(crtc_state))
13927 			return PTR_ERR(crtc_state);
13928 
13929 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13930 
13931 		if (!crtc_state->hw.active ||
13932 		    intel_crtc_needs_modeset(crtc_state))
13933 			continue;
13934 
13935 		/* 2 or more enabled crtcs means no need for w/a */
13936 		if (enabled_pipe != INVALID_PIPE)
13937 			return 0;
13938 
13939 		enabled_pipe = crtc->pipe;
13940 	}
13941 
13942 	if (enabled_pipe != INVALID_PIPE)
13943 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13944 	else if (other_crtc_state)
13945 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13946 
13947 	return 0;
13948 }
13949 
13950 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
13951 			   u8 active_pipes)
13952 {
13953 	const struct intel_crtc_state *crtc_state;
13954 	struct intel_crtc *crtc;
13955 	int i;
13956 
13957 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13958 		if (crtc_state->hw.active)
13959 			active_pipes |= BIT(crtc->pipe);
13960 		else
13961 			active_pipes &= ~BIT(crtc->pipe);
13962 	}
13963 
13964 	return active_pipes;
13965 }
13966 
13967 static int intel_modeset_checks(struct intel_atomic_state *state)
13968 {
13969 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13970 
13971 	state->modeset = true;
13972 
13973 	if (IS_HASWELL(dev_priv))
13974 		return hsw_mode_set_planes_workaround(state);
13975 
13976 	return 0;
13977 }
13978 
13979 /*
13980  * Handle calculation of various watermark data at the end of the atomic check
13981  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13982  * handlers to ensure that all derived state has been updated.
13983  */
13984 static int calc_watermark_data(struct intel_atomic_state *state)
13985 {
13986 	struct drm_device *dev = state->base.dev;
13987 	struct drm_i915_private *dev_priv = to_i915(dev);
13988 
13989 	/* Is there platform-specific watermark information to calculate? */
13990 	if (dev_priv->display.compute_global_watermarks)
13991 		return dev_priv->display.compute_global_watermarks(state);
13992 
13993 	return 0;
13994 }
13995 
13996 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13997 				     struct intel_crtc_state *new_crtc_state)
13998 {
13999 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14000 		return;
14001 
14002 	new_crtc_state->uapi.mode_changed = false;
14003 	new_crtc_state->update_pipe = true;
14004 }
14005 
14006 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
14007 				    struct intel_crtc_state *new_crtc_state)
14008 {
14009 	/*
14010 	 * If we're not doing the full modeset we want to
14011 	 * keep the current M/N values as they may be
14012 	 * sufficiently different to the computed values
14013 	 * to cause problems.
14014 	 *
14015 	 * FIXME: should really copy more fuzzy state here
14016 	 */
14017 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14018 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14019 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14020 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14021 }
14022 
14023 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14024 					  struct intel_crtc *crtc,
14025 					  u8 plane_ids_mask)
14026 {
14027 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14028 	struct intel_plane *plane;
14029 
14030 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14031 		struct intel_plane_state *plane_state;
14032 
14033 		if ((plane_ids_mask & BIT(plane->id)) == 0)
14034 			continue;
14035 
14036 		plane_state = intel_atomic_get_plane_state(state, plane);
14037 		if (IS_ERR(plane_state))
14038 			return PTR_ERR(plane_state);
14039 	}
14040 
14041 	return 0;
14042 }
14043 
14044 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
14045 				     struct intel_crtc *crtc)
14046 {
14047 	const struct intel_crtc_state *old_crtc_state =
14048 		intel_atomic_get_old_crtc_state(state, crtc);
14049 	const struct intel_crtc_state *new_crtc_state =
14050 		intel_atomic_get_new_crtc_state(state, crtc);
14051 
14052 	return intel_crtc_add_planes_to_state(state, crtc,
14053 					      old_crtc_state->enabled_planes |
14054 					      new_crtc_state->enabled_planes);
14055 }
14056 
14057 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14058 {
14059 	/* See {hsw,vlv,ivb}_plane_ratio() */
14060 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14061 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14062 		IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
14063 }
14064 
14065 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
14066 					   struct intel_crtc *crtc,
14067 					   struct intel_crtc *other)
14068 {
14069 	const struct intel_plane_state *plane_state;
14070 	struct intel_plane *plane;
14071 	u8 plane_ids = 0;
14072 	int i;
14073 
14074 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14075 		if (plane->pipe == crtc->pipe)
14076 			plane_ids |= BIT(plane->id);
14077 	}
14078 
14079 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
14080 }
14081 
14082 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
14083 {
14084 	const struct intel_crtc_state *crtc_state;
14085 	struct intel_crtc *crtc;
14086 	int i;
14087 
14088 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14089 		int ret;
14090 
14091 		if (!crtc_state->bigjoiner)
14092 			continue;
14093 
14094 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
14095 						      crtc_state->bigjoiner_linked_crtc);
14096 		if (ret)
14097 			return ret;
14098 	}
14099 
14100 	return 0;
14101 }
14102 
14103 static int intel_atomic_check_planes(struct intel_atomic_state *state)
14104 {
14105 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14106 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14107 	struct intel_plane_state *plane_state;
14108 	struct intel_plane *plane;
14109 	struct intel_crtc *crtc;
14110 	int i, ret;
14111 
14112 	ret = icl_add_linked_planes(state);
14113 	if (ret)
14114 		return ret;
14115 
14116 	ret = intel_bigjoiner_add_affected_planes(state);
14117 	if (ret)
14118 		return ret;
14119 
14120 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14121 		ret = intel_plane_atomic_check(state, plane);
14122 		if (ret) {
14123 			drm_dbg_atomic(&dev_priv->drm,
14124 				       "[PLANE:%d:%s] atomic driver check failed\n",
14125 				       plane->base.base.id, plane->base.name);
14126 			return ret;
14127 		}
14128 	}
14129 
14130 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14131 					    new_crtc_state, i) {
14132 		u8 old_active_planes, new_active_planes;
14133 
14134 		ret = icl_check_nv12_planes(new_crtc_state);
14135 		if (ret)
14136 			return ret;
14137 
14138 		/*
14139 		 * On some platforms the number of active planes affects
14140 		 * the planes' minimum cdclk calculation. Add such planes
14141 		 * to the state before we compute the minimum cdclk.
14142 		 */
14143 		if (!active_planes_affects_min_cdclk(dev_priv))
14144 			continue;
14145 
14146 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14147 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14148 
14149 		/*
14150 		 * Not only the number of planes, but if the plane configuration had
14151 		 * changed might already mean we need to recompute min CDCLK,
14152 		 * because different planes might consume different amount of Dbuf bandwidth
14153 		 * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
14154 		 */
14155 		if (old_active_planes == new_active_planes)
14156 			continue;
14157 
14158 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14159 		if (ret)
14160 			return ret;
14161 	}
14162 
14163 	return 0;
14164 }
14165 
14166 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
14167 				    bool *need_cdclk_calc)
14168 {
14169 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14170 	const struct intel_cdclk_state *old_cdclk_state;
14171 	const struct intel_cdclk_state *new_cdclk_state;
14172 	struct intel_plane_state *plane_state;
14173 	struct intel_bw_state *new_bw_state;
14174 	struct intel_plane *plane;
14175 	int min_cdclk = 0;
14176 	enum pipe pipe;
14177 	int ret;
14178 	int i;
14179 	/*
14180 	 * active_planes bitmask has been updated, and potentially
14181 	 * affected planes are part of the state. We can now
14182 	 * compute the minimum cdclk for each plane.
14183 	 */
14184 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14185 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14186 		if (ret)
14187 			return ret;
14188 	}
14189 
14190 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
14191 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
14192 
14193 	if (new_cdclk_state &&
14194 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
14195 		*need_cdclk_calc = true;
14196 
14197 	ret = dev_priv->display.bw_calc_min_cdclk(state);
14198 	if (ret)
14199 		return ret;
14200 
14201 	new_bw_state = intel_atomic_get_new_bw_state(state);
14202 
14203 	if (!new_cdclk_state || !new_bw_state)
14204 		return 0;
14205 
14206 	for_each_pipe(dev_priv, pipe) {
14207 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
14208 
14209 		/*
14210 		 * Currently do this change only if we need to increase
14211 		 */
14212 		if (new_bw_state->min_cdclk > min_cdclk)
14213 			*need_cdclk_calc = true;
14214 	}
14215 
14216 	return 0;
14217 }
14218 
14219 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14220 {
14221 	struct intel_crtc_state *crtc_state;
14222 	struct intel_crtc *crtc;
14223 	int i;
14224 
14225 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14226 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14227 		int ret;
14228 
14229 		ret = intel_crtc_atomic_check(state, crtc);
14230 		if (ret) {
14231 			drm_dbg_atomic(&i915->drm,
14232 				       "[CRTC:%d:%s] atomic driver check failed\n",
14233 				       crtc->base.base.id, crtc->base.name);
14234 			return ret;
14235 		}
14236 	}
14237 
14238 	return 0;
14239 }
14240 
14241 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14242 					       u8 transcoders)
14243 {
14244 	const struct intel_crtc_state *new_crtc_state;
14245 	struct intel_crtc *crtc;
14246 	int i;
14247 
14248 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14249 		if (new_crtc_state->hw.enable &&
14250 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14251 		    intel_crtc_needs_modeset(new_crtc_state))
14252 			return true;
14253 	}
14254 
14255 	return false;
14256 }
14257 
14258 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
14259 					struct intel_crtc *crtc,
14260 					struct intel_crtc_state *old_crtc_state,
14261 					struct intel_crtc_state *new_crtc_state)
14262 {
14263 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14264 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
14265 	struct intel_crtc *slave, *master;
14266 
14267 	/* slave being enabled, is master is still claiming this crtc? */
14268 	if (old_crtc_state->bigjoiner_slave) {
14269 		slave = crtc;
14270 		master = old_crtc_state->bigjoiner_linked_crtc;
14271 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
14272 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
14273 			goto claimed;
14274 	}
14275 
14276 	if (!new_crtc_state->bigjoiner)
14277 		return 0;
14278 
14279 	if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
14280 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
14281 			      "CRTC + 1 to be used, doesn't exist\n",
14282 			      crtc->base.base.id, crtc->base.name);
14283 		return -EINVAL;
14284 	}
14285 
14286 	slave = new_crtc_state->bigjoiner_linked_crtc =
14287 		intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
14288 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
14289 	master = crtc;
14290 	if (IS_ERR(slave_crtc_state))
14291 		return PTR_ERR(slave_crtc_state);
14292 
14293 	/* master being enabled, slave was already configured? */
14294 	if (slave_crtc_state->uapi.enable)
14295 		goto claimed;
14296 
14297 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
14298 		      slave->base.base.id, slave->base.name);
14299 
14300 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
14301 
14302 claimed:
14303 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
14304 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
14305 		      slave->base.base.id, slave->base.name,
14306 		      master->base.base.id, master->base.name);
14307 	return -EINVAL;
14308 }
14309 
14310 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
14311 				 struct intel_crtc_state *master_crtc_state)
14312 {
14313 	struct intel_crtc_state *slave_crtc_state =
14314 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
14315 
14316 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
14317 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
14318 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
14319 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
14320 }
14321 
14322 /**
14323  * DOC: asynchronous flip implementation
14324  *
14325  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
14326  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
14327  * Correspondingly, support is currently added for primary plane only.
14328  *
14329  * Async flip can only change the plane surface address, so anything else
14330  * changing is rejected from the intel_atomic_check_async() function.
14331  * Once this check is cleared, flip done interrupt is enabled using
14332  * the skl_enable_flip_done() function.
14333  *
14334  * As soon as the surface address register is written, flip done interrupt is
14335  * generated and the requested events are sent to the usersapce in the interrupt
14336  * handler itself. The timestamp and sequence sent during the flip done event
14337  * correspond to the last vblank and have no relation to the actual time when
14338  * the flip done event was sent.
14339  */
14340 static int intel_atomic_check_async(struct intel_atomic_state *state)
14341 {
14342 	struct drm_i915_private *i915 = to_i915(state->base.dev);
14343 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14344 	const struct intel_plane_state *new_plane_state, *old_plane_state;
14345 	struct intel_crtc *crtc;
14346 	struct intel_plane *plane;
14347 	int i;
14348 
14349 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14350 					    new_crtc_state, i) {
14351 		if (intel_crtc_needs_modeset(new_crtc_state)) {
14352 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
14353 			return -EINVAL;
14354 		}
14355 
14356 		if (!new_crtc_state->hw.active) {
14357 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
14358 			return -EINVAL;
14359 		}
14360 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
14361 			drm_dbg_kms(&i915->drm,
14362 				    "Active planes cannot be changed during async flip\n");
14363 			return -EINVAL;
14364 		}
14365 	}
14366 
14367 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14368 					     new_plane_state, i) {
14369 		/*
14370 		 * TODO: Async flip is only supported through the page flip IOCTL
14371 		 * as of now. So support currently added for primary plane only.
14372 		 * Support for other planes on platforms on which supports
14373 		 * this(vlv/chv and icl+) should be added when async flip is
14374 		 * enabled in the atomic IOCTL path.
14375 		 */
14376 		if (plane->id != PLANE_PRIMARY)
14377 			return -EINVAL;
14378 
14379 		/*
14380 		 * FIXME: This check is kept generic for all platforms.
14381 		 * Need to verify this for all gen9 and gen10 platforms to enable
14382 		 * this selectively if required.
14383 		 */
14384 		switch (new_plane_state->hw.fb->modifier) {
14385 		case I915_FORMAT_MOD_X_TILED:
14386 		case I915_FORMAT_MOD_Y_TILED:
14387 		case I915_FORMAT_MOD_Yf_TILED:
14388 			break;
14389 		default:
14390 			drm_dbg_kms(&i915->drm,
14391 				    "Linear memory/CCS does not support async flips\n");
14392 			return -EINVAL;
14393 		}
14394 
14395 		if (old_plane_state->color_plane[0].stride !=
14396 		    new_plane_state->color_plane[0].stride) {
14397 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
14398 			return -EINVAL;
14399 		}
14400 
14401 		if (old_plane_state->hw.fb->modifier !=
14402 		    new_plane_state->hw.fb->modifier) {
14403 			drm_dbg_kms(&i915->drm,
14404 				    "Framebuffer modifiers cannot be changed in async flip\n");
14405 			return -EINVAL;
14406 		}
14407 
14408 		if (old_plane_state->hw.fb->format !=
14409 		    new_plane_state->hw.fb->format) {
14410 			drm_dbg_kms(&i915->drm,
14411 				    "Framebuffer format cannot be changed in async flip\n");
14412 			return -EINVAL;
14413 		}
14414 
14415 		if (old_plane_state->hw.rotation !=
14416 		    new_plane_state->hw.rotation) {
14417 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
14418 			return -EINVAL;
14419 		}
14420 
14421 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
14422 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
14423 			drm_dbg_kms(&i915->drm,
14424 				    "Plane size/co-ordinates cannot be changed in async flip\n");
14425 			return -EINVAL;
14426 		}
14427 
14428 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
14429 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
14430 			return -EINVAL;
14431 		}
14432 
14433 		if (old_plane_state->hw.pixel_blend_mode !=
14434 		    new_plane_state->hw.pixel_blend_mode) {
14435 			drm_dbg_kms(&i915->drm,
14436 				    "Pixel blend mode cannot be changed in async flip\n");
14437 			return -EINVAL;
14438 		}
14439 
14440 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
14441 			drm_dbg_kms(&i915->drm,
14442 				    "Color encoding cannot be changed in async flip\n");
14443 			return -EINVAL;
14444 		}
14445 
14446 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
14447 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
14448 			return -EINVAL;
14449 		}
14450 	}
14451 
14452 	return 0;
14453 }
14454 
14455 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
14456 {
14457 	struct intel_crtc_state *crtc_state;
14458 	struct intel_crtc *crtc;
14459 	int i;
14460 
14461 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14462 		struct intel_crtc_state *linked_crtc_state;
14463 		struct intel_crtc *linked_crtc;
14464 		int ret;
14465 
14466 		if (!crtc_state->bigjoiner)
14467 			continue;
14468 
14469 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
14470 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
14471 		if (IS_ERR(linked_crtc_state))
14472 			return PTR_ERR(linked_crtc_state);
14473 
14474 		if (!intel_crtc_needs_modeset(crtc_state))
14475 			continue;
14476 
14477 		linked_crtc_state->uapi.mode_changed = true;
14478 
14479 		ret = drm_atomic_add_affected_connectors(&state->base,
14480 							 &linked_crtc->base);
14481 		if (ret)
14482 			return ret;
14483 
14484 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
14485 		if (ret)
14486 			return ret;
14487 	}
14488 
14489 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14490 		/* Kill old bigjoiner link, we may re-establish afterwards */
14491 		if (intel_crtc_needs_modeset(crtc_state) &&
14492 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
14493 			kill_bigjoiner_slave(state, crtc_state);
14494 	}
14495 
14496 	return 0;
14497 }
14498 
14499 /**
14500  * intel_atomic_check - validate state object
14501  * @dev: drm device
14502  * @_state: state to validate
14503  */
14504 static int intel_atomic_check(struct drm_device *dev,
14505 			      struct drm_atomic_state *_state)
14506 {
14507 	struct drm_i915_private *dev_priv = to_i915(dev);
14508 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
14509 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14510 	struct intel_crtc *crtc;
14511 	int ret, i;
14512 	bool any_ms = false;
14513 
14514 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14515 					    new_crtc_state, i) {
14516 		if (new_crtc_state->inherited != old_crtc_state->inherited)
14517 			new_crtc_state->uapi.mode_changed = true;
14518 	}
14519 
14520 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
14521 	if (ret)
14522 		goto fail;
14523 
14524 	ret = intel_bigjoiner_add_affected_crtcs(state);
14525 	if (ret)
14526 		goto fail;
14527 
14528 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14529 					    new_crtc_state, i) {
14530 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
14531 			/* Light copy */
14532 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
14533 
14534 			continue;
14535 		}
14536 
14537 		if (!new_crtc_state->uapi.enable) {
14538 			if (!new_crtc_state->bigjoiner_slave) {
14539 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
14540 				any_ms = true;
14541 			}
14542 			continue;
14543 		}
14544 
14545 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
14546 		if (ret)
14547 			goto fail;
14548 
14549 		ret = intel_modeset_pipe_config(state, new_crtc_state);
14550 		if (ret)
14551 			goto fail;
14552 
14553 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
14554 						   new_crtc_state);
14555 		if (ret)
14556 			goto fail;
14557 	}
14558 
14559 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14560 					    new_crtc_state, i) {
14561 		if (!intel_crtc_needs_modeset(new_crtc_state))
14562 			continue;
14563 
14564 		ret = intel_modeset_pipe_config_late(new_crtc_state);
14565 		if (ret)
14566 			goto fail;
14567 
14568 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14569 	}
14570 
14571 	/**
14572 	 * Check if fastset is allowed by external dependencies like other
14573 	 * pipes and transcoders.
14574 	 *
14575 	 * Right now it only forces a fullmodeset when the MST master
14576 	 * transcoder did not changed but the pipe of the master transcoder
14577 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14578 	 * in case of port synced crtcs, if one of the synced crtcs
14579 	 * needs a full modeset, all other synced crtcs should be
14580 	 * forced a full modeset.
14581 	 */
14582 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14583 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
14584 			continue;
14585 
14586 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14587 			enum transcoder master = new_crtc_state->mst_master_transcoder;
14588 
14589 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14590 				new_crtc_state->uapi.mode_changed = true;
14591 				new_crtc_state->update_pipe = false;
14592 			}
14593 		}
14594 
14595 		if (is_trans_port_sync_mode(new_crtc_state)) {
14596 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
14597 
14598 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14599 				trans |= BIT(new_crtc_state->master_transcoder);
14600 
14601 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
14602 				new_crtc_state->uapi.mode_changed = true;
14603 				new_crtc_state->update_pipe = false;
14604 			}
14605 		}
14606 
14607 		if (new_crtc_state->bigjoiner) {
14608 			struct intel_crtc_state *linked_crtc_state =
14609 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
14610 
14611 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
14612 				new_crtc_state->uapi.mode_changed = true;
14613 				new_crtc_state->update_pipe = false;
14614 			}
14615 		}
14616 	}
14617 
14618 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14619 					    new_crtc_state, i) {
14620 		if (intel_crtc_needs_modeset(new_crtc_state)) {
14621 			any_ms = true;
14622 			continue;
14623 		}
14624 
14625 		if (!new_crtc_state->update_pipe)
14626 			continue;
14627 
14628 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
14629 	}
14630 
14631 	if (any_ms && !check_digital_port_conflicts(state)) {
14632 		drm_dbg_kms(&dev_priv->drm,
14633 			    "rejecting conflicting digital port configuration\n");
14634 		ret = -EINVAL;
14635 		goto fail;
14636 	}
14637 
14638 	ret = drm_dp_mst_atomic_check(&state->base);
14639 	if (ret)
14640 		goto fail;
14641 
14642 	ret = intel_atomic_check_planes(state);
14643 	if (ret)
14644 		goto fail;
14645 
14646 	/*
14647 	 * distrust_bios_wm will force a full dbuf recomputation
14648 	 * but the hardware state will only get updated accordingly
14649 	 * if state->modeset==true. Hence distrust_bios_wm==true &&
14650 	 * state->modeset==false is an invalid combination which
14651 	 * would cause the hardware and software dbuf state to get
14652 	 * out of sync. We must prevent that.
14653 	 *
14654 	 * FIXME clean up this mess and introduce better
14655 	 * state tracking for dbuf.
14656 	 */
14657 	if (dev_priv->wm.distrust_bios_wm)
14658 		any_ms = true;
14659 
14660 	intel_fbc_choose_crtc(dev_priv, state);
14661 	ret = calc_watermark_data(state);
14662 	if (ret)
14663 		goto fail;
14664 
14665 	ret = intel_bw_atomic_check(state);
14666 	if (ret)
14667 		goto fail;
14668 
14669 	ret = intel_atomic_check_cdclk(state, &any_ms);
14670 	if (ret)
14671 		goto fail;
14672 
14673 	if (any_ms) {
14674 		ret = intel_modeset_checks(state);
14675 		if (ret)
14676 			goto fail;
14677 
14678 		ret = intel_modeset_calc_cdclk(state);
14679 		if (ret)
14680 			return ret;
14681 
14682 		intel_modeset_clear_plls(state);
14683 	}
14684 
14685 	ret = intel_atomic_check_crtcs(state);
14686 	if (ret)
14687 		goto fail;
14688 
14689 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14690 					    new_crtc_state, i) {
14691 		if (new_crtc_state->uapi.async_flip) {
14692 			ret = intel_atomic_check_async(state);
14693 			if (ret)
14694 				goto fail;
14695 		}
14696 
14697 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
14698 		    !new_crtc_state->update_pipe)
14699 			continue;
14700 
14701 		intel_dump_pipe_config(new_crtc_state, state,
14702 				       intel_crtc_needs_modeset(new_crtc_state) ?
14703 				       "[modeset]" : "[fastset]");
14704 	}
14705 
14706 	return 0;
14707 
14708  fail:
14709 	if (ret == -EDEADLK)
14710 		return ret;
14711 
14712 	/*
14713 	 * FIXME would probably be nice to know which crtc specifically
14714 	 * caused the failure, in cases where we can pinpoint it.
14715 	 */
14716 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14717 					    new_crtc_state, i)
14718 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14719 
14720 	return ret;
14721 }
14722 
14723 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14724 {
14725 	struct intel_crtc_state *crtc_state;
14726 	struct intel_crtc *crtc;
14727 	int i, ret;
14728 
14729 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
14730 	if (ret < 0)
14731 		return ret;
14732 
14733 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14734 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
14735 
14736 		if (mode_changed || crtc_state->update_pipe ||
14737 		    crtc_state->uapi.color_mgmt_changed) {
14738 			intel_dsb_prepare(crtc_state);
14739 		}
14740 	}
14741 
14742 	return 0;
14743 }
14744 
14745 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14746 {
14747 	struct drm_device *dev = crtc->base.dev;
14748 	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14749 
14750 	if (!vblank->max_vblank_count)
14751 		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14752 
14753 	return crtc->base.funcs->get_vblank_counter(&crtc->base);
14754 }
14755 
14756 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14757 				  struct intel_crtc_state *crtc_state)
14758 {
14759 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14760 
14761 	if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
14762 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14763 
14764 	if (crtc_state->has_pch_encoder) {
14765 		enum pipe pch_transcoder =
14766 			intel_crtc_pch_transcoder(crtc);
14767 
14768 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14769 	}
14770 }
14771 
14772 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14773 			       const struct intel_crtc_state *new_crtc_state)
14774 {
14775 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
14776 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14777 
14778 	/*
14779 	 * Update pipe size and adjust fitter if needed: the reason for this is
14780 	 * that in compute_mode_changes we check the native mode (not the pfit
14781 	 * mode) to see if we can flip rather than do a full mode set. In the
14782 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
14783 	 * pfit state, we'll end up with a big fb scanned out into the wrong
14784 	 * sized surface.
14785 	 */
14786 	intel_set_pipe_src_size(new_crtc_state);
14787 
14788 	/* on skylake this is done by detaching scalers */
14789 	if (INTEL_GEN(dev_priv) >= 9) {
14790 		skl_detach_scalers(new_crtc_state);
14791 
14792 		if (new_crtc_state->pch_pfit.enabled)
14793 			skl_pfit_enable(new_crtc_state);
14794 	} else if (HAS_PCH_SPLIT(dev_priv)) {
14795 		if (new_crtc_state->pch_pfit.enabled)
14796 			ilk_pfit_enable(new_crtc_state);
14797 		else if (old_crtc_state->pch_pfit.enabled)
14798 			ilk_pfit_disable(old_crtc_state);
14799 	}
14800 
14801 	/*
14802 	 * The register is supposedly single buffered so perhaps
14803 	 * not 100% correct to do this here. But SKL+ calculate
14804 	 * this based on the adjust pixel rate so pfit changes do
14805 	 * affect it and so it must be updated for fastsets.
14806 	 * HSW/BDW only really need this here for fastboot, after
14807 	 * that the value should not change without a full modeset.
14808 	 */
14809 	if (INTEL_GEN(dev_priv) >= 9 ||
14810 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14811 		hsw_set_linetime_wm(new_crtc_state);
14812 
14813 	if (INTEL_GEN(dev_priv) >= 11)
14814 		icl_set_pipe_chicken(crtc);
14815 }
14816 
14817 static void commit_pipe_config(struct intel_atomic_state *state,
14818 			       struct intel_crtc *crtc)
14819 {
14820 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14821 	const struct intel_crtc_state *old_crtc_state =
14822 		intel_atomic_get_old_crtc_state(state, crtc);
14823 	const struct intel_crtc_state *new_crtc_state =
14824 		intel_atomic_get_new_crtc_state(state, crtc);
14825 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
14826 
14827 	/*
14828 	 * During modesets pipe configuration was programmed as the
14829 	 * CRTC was enabled.
14830 	 */
14831 	if (!modeset) {
14832 		if (new_crtc_state->uapi.color_mgmt_changed ||
14833 		    new_crtc_state->update_pipe)
14834 			intel_color_commit(new_crtc_state);
14835 
14836 		if (INTEL_GEN(dev_priv) >= 9)
14837 			skl_detach_scalers(new_crtc_state);
14838 
14839 		if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14840 			bdw_set_pipemisc(new_crtc_state);
14841 
14842 		if (new_crtc_state->update_pipe)
14843 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
14844 
14845 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
14846 	}
14847 
14848 	if (dev_priv->display.atomic_update_watermarks)
14849 		dev_priv->display.atomic_update_watermarks(state, crtc);
14850 }
14851 
14852 static void intel_enable_crtc(struct intel_atomic_state *state,
14853 			      struct intel_crtc *crtc)
14854 {
14855 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14856 	const struct intel_crtc_state *new_crtc_state =
14857 		intel_atomic_get_new_crtc_state(state, crtc);
14858 
14859 	if (!intel_crtc_needs_modeset(new_crtc_state))
14860 		return;
14861 
14862 	intel_crtc_update_active_timings(new_crtc_state);
14863 
14864 	dev_priv->display.crtc_enable(state, crtc);
14865 
14866 	if (new_crtc_state->bigjoiner_slave)
14867 		return;
14868 
14869 	/* vblanks work again, re-enable pipe CRC. */
14870 	intel_crtc_enable_pipe_crc(crtc);
14871 }
14872 
14873 static void intel_update_crtc(struct intel_atomic_state *state,
14874 			      struct intel_crtc *crtc)
14875 {
14876 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14877 	const struct intel_crtc_state *old_crtc_state =
14878 		intel_atomic_get_old_crtc_state(state, crtc);
14879 	struct intel_crtc_state *new_crtc_state =
14880 		intel_atomic_get_new_crtc_state(state, crtc);
14881 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
14882 
14883 	if (!modeset) {
14884 		if (new_crtc_state->preload_luts &&
14885 		    (new_crtc_state->uapi.color_mgmt_changed ||
14886 		     new_crtc_state->update_pipe))
14887 			intel_color_load_luts(new_crtc_state);
14888 
14889 		intel_pre_plane_update(state, crtc);
14890 
14891 		if (new_crtc_state->update_pipe)
14892 			intel_encoders_update_pipe(state, crtc);
14893 	}
14894 
14895 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14896 		intel_fbc_disable(crtc);
14897 	else
14898 		intel_fbc_enable(state, crtc);
14899 
14900 	/* Perform vblank evasion around commit operation */
14901 	intel_pipe_update_start(new_crtc_state);
14902 
14903 	commit_pipe_config(state, crtc);
14904 
14905 	if (INTEL_GEN(dev_priv) >= 9)
14906 		skl_update_planes_on_crtc(state, crtc);
14907 	else
14908 		i9xx_update_planes_on_crtc(state, crtc);
14909 
14910 	intel_pipe_update_end(new_crtc_state);
14911 
14912 	/*
14913 	 * We usually enable FIFO underrun interrupts as part of the
14914 	 * CRTC enable sequence during modesets.  But when we inherit a
14915 	 * valid pipe configuration from the BIOS we need to take care
14916 	 * of enabling them on the CRTC's first fastset.
14917 	 */
14918 	if (new_crtc_state->update_pipe && !modeset &&
14919 	    old_crtc_state->inherited)
14920 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14921 }
14922 
14923 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14924 					  struct intel_crtc_state *old_crtc_state,
14925 					  struct intel_crtc_state *new_crtc_state,
14926 					  struct intel_crtc *crtc)
14927 {
14928 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14929 
14930 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
14931 
14932 	intel_crtc_disable_planes(state, crtc);
14933 
14934 	/*
14935 	 * We still need special handling for disabling bigjoiner master
14936 	 * and slaves since for slave we do not have encoder or plls
14937 	 * so we dont need to disable those.
14938 	 */
14939 	if (old_crtc_state->bigjoiner) {
14940 		intel_crtc_disable_planes(state,
14941 					  old_crtc_state->bigjoiner_linked_crtc);
14942 		old_crtc_state->bigjoiner_linked_crtc->active = false;
14943 	}
14944 
14945 	/*
14946 	 * We need to disable pipe CRC before disabling the pipe,
14947 	 * or we race against vblank off.
14948 	 */
14949 	intel_crtc_disable_pipe_crc(crtc);
14950 
14951 	dev_priv->display.crtc_disable(state, crtc);
14952 	crtc->active = false;
14953 	intel_fbc_disable(crtc);
14954 	intel_disable_shared_dpll(old_crtc_state);
14955 
14956 	/* FIXME unify this for all platforms */
14957 	if (!new_crtc_state->hw.active &&
14958 	    !HAS_GMCH(dev_priv) &&
14959 	    dev_priv->display.initial_watermarks)
14960 		dev_priv->display.initial_watermarks(state, crtc);
14961 }
14962 
14963 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14964 {
14965 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14966 	struct intel_crtc *crtc;
14967 	u32 handled = 0;
14968 	int i;
14969 
14970 	/* Only disable port sync and MST slaves */
14971 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14972 					    new_crtc_state, i) {
14973 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
14974 			continue;
14975 
14976 		if (!old_crtc_state->hw.active)
14977 			continue;
14978 
14979 		/* In case of Transcoder port Sync master slave CRTCs can be
14980 		 * assigned in any order and we need to make sure that
14981 		 * slave CRTCs are disabled first and then master CRTC since
14982 		 * Slave vblanks are masked till Master Vblanks.
14983 		 */
14984 		if (!is_trans_port_sync_slave(old_crtc_state) &&
14985 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
14986 			continue;
14987 
14988 		intel_pre_plane_update(state, crtc);
14989 		intel_old_crtc_state_disables(state, old_crtc_state,
14990 					      new_crtc_state, crtc);
14991 		handled |= BIT(crtc->pipe);
14992 	}
14993 
14994 	/* Disable everything else left on */
14995 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14996 					    new_crtc_state, i) {
14997 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
14998 		    (handled & BIT(crtc->pipe)) ||
14999 		    old_crtc_state->bigjoiner_slave)
15000 			continue;
15001 
15002 		intel_pre_plane_update(state, crtc);
15003 		if (old_crtc_state->bigjoiner) {
15004 			struct intel_crtc *slave =
15005 				old_crtc_state->bigjoiner_linked_crtc;
15006 
15007 			intel_pre_plane_update(state, slave);
15008 		}
15009 
15010 		if (old_crtc_state->hw.active)
15011 			intel_old_crtc_state_disables(state, old_crtc_state,
15012 						      new_crtc_state, crtc);
15013 	}
15014 }
15015 
15016 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
15017 {
15018 	struct intel_crtc_state *new_crtc_state;
15019 	struct intel_crtc *crtc;
15020 	int i;
15021 
15022 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15023 		if (!new_crtc_state->hw.active)
15024 			continue;
15025 
15026 		intel_enable_crtc(state, crtc);
15027 		intel_update_crtc(state, crtc);
15028 	}
15029 }
15030 
15031 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15032 {
15033 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15034 	struct intel_crtc *crtc;
15035 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15036 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15037 	u8 update_pipes = 0, modeset_pipes = 0;
15038 	int i;
15039 
15040 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15041 		enum pipe pipe = crtc->pipe;
15042 
15043 		if (!new_crtc_state->hw.active)
15044 			continue;
15045 
15046 		/* ignore allocations for crtc's that have been turned off. */
15047 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
15048 			entries[pipe] = old_crtc_state->wm.skl.ddb;
15049 			update_pipes |= BIT(pipe);
15050 		} else {
15051 			modeset_pipes |= BIT(pipe);
15052 		}
15053 	}
15054 
15055 	/*
15056 	 * Whenever the number of active pipes changes, we need to make sure we
15057 	 * update the pipes in the right order so that their ddb allocations
15058 	 * never overlap with each other between CRTC updates. Otherwise we'll
15059 	 * cause pipe underruns and other bad stuff.
15060 	 *
15061 	 * So first lets enable all pipes that do not need a fullmodeset as
15062 	 * those don't have any external dependency.
15063 	 */
15064 	while (update_pipes) {
15065 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15066 						    new_crtc_state, i) {
15067 			enum pipe pipe = crtc->pipe;
15068 
15069 			if ((update_pipes & BIT(pipe)) == 0)
15070 				continue;
15071 
15072 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15073 							entries, I915_MAX_PIPES, pipe))
15074 				continue;
15075 
15076 			entries[pipe] = new_crtc_state->wm.skl.ddb;
15077 			update_pipes &= ~BIT(pipe);
15078 
15079 			intel_update_crtc(state, crtc);
15080 
15081 			/*
15082 			 * If this is an already active pipe, it's DDB changed,
15083 			 * and this isn't the last pipe that needs updating
15084 			 * then we need to wait for a vblank to pass for the
15085 			 * new ddb allocation to take effect.
15086 			 */
15087 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15088 						 &old_crtc_state->wm.skl.ddb) &&
15089 			    (update_pipes | modeset_pipes))
15090 				intel_wait_for_vblank(dev_priv, pipe);
15091 		}
15092 	}
15093 
15094 	update_pipes = modeset_pipes;
15095 
15096 	/*
15097 	 * Enable all pipes that needs a modeset and do not depends on other
15098 	 * pipes
15099 	 */
15100 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15101 		enum pipe pipe = crtc->pipe;
15102 
15103 		if ((modeset_pipes & BIT(pipe)) == 0)
15104 			continue;
15105 
15106 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15107 		    is_trans_port_sync_master(new_crtc_state) ||
15108 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
15109 			continue;
15110 
15111 		modeset_pipes &= ~BIT(pipe);
15112 
15113 		intel_enable_crtc(state, crtc);
15114 	}
15115 
15116 	/*
15117 	 * Then we enable all remaining pipes that depend on other
15118 	 * pipes: MST slaves and port sync masters, big joiner master
15119 	 */
15120 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15121 		enum pipe pipe = crtc->pipe;
15122 
15123 		if ((modeset_pipes & BIT(pipe)) == 0)
15124 			continue;
15125 
15126 		modeset_pipes &= ~BIT(pipe);
15127 
15128 		intel_enable_crtc(state, crtc);
15129 	}
15130 
15131 	/*
15132 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
15133 	 */
15134 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15135 		enum pipe pipe = crtc->pipe;
15136 
15137 		if ((update_pipes & BIT(pipe)) == 0)
15138 			continue;
15139 
15140 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15141 									entries, I915_MAX_PIPES, pipe));
15142 
15143 		entries[pipe] = new_crtc_state->wm.skl.ddb;
15144 		update_pipes &= ~BIT(pipe);
15145 
15146 		intel_update_crtc(state, crtc);
15147 	}
15148 
15149 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
15150 	drm_WARN_ON(&dev_priv->drm, update_pipes);
15151 }
15152 
15153 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15154 {
15155 	struct intel_atomic_state *state, *next;
15156 	struct llist_node *freed;
15157 
15158 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15159 	llist_for_each_entry_safe(state, next, freed, freed)
15160 		drm_atomic_state_put(&state->base);
15161 }
15162 
15163 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15164 {
15165 	struct drm_i915_private *dev_priv =
15166 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15167 
15168 	intel_atomic_helper_free_state(dev_priv);
15169 }
15170 
15171 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15172 {
15173 	struct wait_queue_entry wait_fence, wait_reset;
15174 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15175 
15176 	init_wait_entry(&wait_fence, 0);
15177 	init_wait_entry(&wait_reset, 0);
15178 	for (;;) {
15179 		prepare_to_wait(&intel_state->commit_ready.wait,
15180 				&wait_fence, TASK_UNINTERRUPTIBLE);
15181 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15182 					      I915_RESET_MODESET),
15183 				&wait_reset, TASK_UNINTERRUPTIBLE);
15184 
15185 
15186 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
15187 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15188 			break;
15189 
15190 		schedule();
15191 	}
15192 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15193 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15194 				  I915_RESET_MODESET),
15195 		    &wait_reset);
15196 }
15197 
15198 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
15199 {
15200 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15201 	struct intel_crtc *crtc;
15202 	int i;
15203 
15204 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15205 					    new_crtc_state, i)
15206 		intel_dsb_cleanup(old_crtc_state);
15207 }
15208 
15209 static void intel_atomic_cleanup_work(struct work_struct *work)
15210 {
15211 	struct intel_atomic_state *state =
15212 		container_of(work, struct intel_atomic_state, base.commit_work);
15213 	struct drm_i915_private *i915 = to_i915(state->base.dev);
15214 
15215 	intel_cleanup_dsbs(state);
15216 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
15217 	drm_atomic_helper_commit_cleanup_done(&state->base);
15218 	drm_atomic_state_put(&state->base);
15219 
15220 	intel_atomic_helper_free_state(i915);
15221 }
15222 
15223 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15224 {
15225 	struct drm_device *dev = state->base.dev;
15226 	struct drm_i915_private *dev_priv = to_i915(dev);
15227 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15228 	struct intel_crtc *crtc;
15229 	u64 put_domains[I915_MAX_PIPES] = {};
15230 	intel_wakeref_t wakeref = 0;
15231 	int i;
15232 
15233 	intel_atomic_commit_fence_wait(state);
15234 
15235 	drm_atomic_helper_wait_for_dependencies(&state->base);
15236 
15237 	if (state->modeset)
15238 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15239 
15240 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15241 					    new_crtc_state, i) {
15242 		if (intel_crtc_needs_modeset(new_crtc_state) ||
15243 		    new_crtc_state->update_pipe) {
15244 
15245 			put_domains[crtc->pipe] =
15246 				modeset_get_crtc_power_domains(new_crtc_state);
15247 		}
15248 	}
15249 
15250 	intel_commit_modeset_disables(state);
15251 
15252 	/* FIXME: Eventually get rid of our crtc->config pointer */
15253 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15254 		crtc->config = new_crtc_state;
15255 
15256 	if (state->modeset) {
15257 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15258 
15259 		intel_set_cdclk_pre_plane_update(state);
15260 
15261 		intel_modeset_verify_disabled(dev_priv, state);
15262 	}
15263 
15264 	intel_sagv_pre_plane_update(state);
15265 
15266 	/* Complete the events for pipes that have now been disabled */
15267 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15268 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
15269 
15270 		/* Complete events for now disable pipes here. */
15271 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15272 			spin_lock_irq(&dev->event_lock);
15273 			drm_crtc_send_vblank_event(&crtc->base,
15274 						   new_crtc_state->uapi.event);
15275 			spin_unlock_irq(&dev->event_lock);
15276 
15277 			new_crtc_state->uapi.event = NULL;
15278 		}
15279 	}
15280 
15281 	if (state->modeset)
15282 		intel_encoders_update_prepare(state);
15283 
15284 	intel_dbuf_pre_plane_update(state);
15285 
15286 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15287 		if (new_crtc_state->uapi.async_flip)
15288 			skl_enable_flip_done(crtc);
15289 	}
15290 
15291 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
15292 	dev_priv->display.commit_modeset_enables(state);
15293 
15294 	if (state->modeset) {
15295 		intel_encoders_update_complete(state);
15296 
15297 		intel_set_cdclk_post_plane_update(state);
15298 	}
15299 
15300 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15301 	 * already, but still need the state for the delayed optimization. To
15302 	 * fix this:
15303 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15304 	 * - schedule that vblank worker _before_ calling hw_done
15305 	 * - at the start of commit_tail, cancel it _synchrously
15306 	 * - switch over to the vblank wait helper in the core after that since
15307 	 *   we don't need out special handling any more.
15308 	 */
15309 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15310 
15311 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15312 		if (new_crtc_state->uapi.async_flip)
15313 			skl_disable_flip_done(crtc);
15314 
15315 		if (new_crtc_state->hw.active &&
15316 		    !intel_crtc_needs_modeset(new_crtc_state) &&
15317 		    !new_crtc_state->preload_luts &&
15318 		    (new_crtc_state->uapi.color_mgmt_changed ||
15319 		     new_crtc_state->update_pipe))
15320 			intel_color_load_luts(new_crtc_state);
15321 	}
15322 
15323 	/*
15324 	 * Now that the vblank has passed, we can go ahead and program the
15325 	 * optimal watermarks on platforms that need two-step watermark
15326 	 * programming.
15327 	 *
15328 	 * TODO: Move this (and other cleanup) to an async worker eventually.
15329 	 */
15330 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15331 					    new_crtc_state, i) {
15332 		/*
15333 		 * Gen2 reports pipe underruns whenever all planes are disabled.
15334 		 * So re-enable underrun reporting after some planes get enabled.
15335 		 *
15336 		 * We do this before .optimize_watermarks() so that we have a
15337 		 * chance of catching underruns with the intermediate watermarks
15338 		 * vs. the new plane configuration.
15339 		 */
15340 		if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15341 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15342 
15343 		if (dev_priv->display.optimize_watermarks)
15344 			dev_priv->display.optimize_watermarks(state, crtc);
15345 	}
15346 
15347 	intel_dbuf_post_plane_update(state);
15348 
15349 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15350 		intel_post_plane_update(state, crtc);
15351 
15352 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
15353 
15354 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15355 
15356 		/*
15357 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
15358 		 * cleanup. So copy and reset the dsb structure to sync with
15359 		 * commit_done and later do dsb cleanup in cleanup_work.
15360 		 */
15361 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
15362 	}
15363 
15364 	/* Underruns don't always raise interrupts, so check manually */
15365 	intel_check_cpu_fifo_underruns(dev_priv);
15366 	intel_check_pch_fifo_underruns(dev_priv);
15367 
15368 	if (state->modeset)
15369 		intel_verify_planes(state);
15370 
15371 	intel_sagv_post_plane_update(state);
15372 
15373 	drm_atomic_helper_commit_hw_done(&state->base);
15374 
15375 	if (state->modeset) {
15376 		/* As one of the primary mmio accessors, KMS has a high
15377 		 * likelihood of triggering bugs in unclaimed access. After we
15378 		 * finish modesetting, see if an error has been flagged, and if
15379 		 * so enable debugging for the next modeset - and hope we catch
15380 		 * the culprit.
15381 		 */
15382 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15383 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15384 	}
15385 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15386 
15387 	/*
15388 	 * Defer the cleanup of the old state to a separate worker to not
15389 	 * impede the current task (userspace for blocking modesets) that
15390 	 * are executed inline. For out-of-line asynchronous modesets/flips,
15391 	 * deferring to a new worker seems overkill, but we would place a
15392 	 * schedule point (cond_resched()) here anyway to keep latencies
15393 	 * down.
15394 	 */
15395 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15396 	queue_work(system_highpri_wq, &state->base.commit_work);
15397 }
15398 
15399 static void intel_atomic_commit_work(struct work_struct *work)
15400 {
15401 	struct intel_atomic_state *state =
15402 		container_of(work, struct intel_atomic_state, base.commit_work);
15403 
15404 	intel_atomic_commit_tail(state);
15405 }
15406 
15407 static int __i915_sw_fence_call
15408 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15409 			  enum i915_sw_fence_notify notify)
15410 {
15411 	struct intel_atomic_state *state =
15412 		container_of(fence, struct intel_atomic_state, commit_ready);
15413 
15414 	switch (notify) {
15415 	case FENCE_COMPLETE:
15416 		/* we do blocking waits in the worker, nothing to do here */
15417 		break;
15418 	case FENCE_FREE:
15419 		{
15420 			struct intel_atomic_helper *helper =
15421 				&to_i915(state->base.dev)->atomic_helper;
15422 
15423 			if (llist_add(&state->freed, &helper->free_list))
15424 				schedule_work(&helper->free_work);
15425 			break;
15426 		}
15427 	}
15428 
15429 	return NOTIFY_DONE;
15430 }
15431 
15432 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15433 {
15434 	struct intel_plane_state *old_plane_state, *new_plane_state;
15435 	struct intel_plane *plane;
15436 	int i;
15437 
15438 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15439 					     new_plane_state, i)
15440 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15441 					to_intel_frontbuffer(new_plane_state->hw.fb),
15442 					plane->frontbuffer_bit);
15443 }
15444 
15445 static int intel_atomic_commit(struct drm_device *dev,
15446 			       struct drm_atomic_state *_state,
15447 			       bool nonblock)
15448 {
15449 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
15450 	struct drm_i915_private *dev_priv = to_i915(dev);
15451 	int ret = 0;
15452 
15453 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15454 
15455 	drm_atomic_state_get(&state->base);
15456 	i915_sw_fence_init(&state->commit_ready,
15457 			   intel_atomic_commit_ready);
15458 
15459 	/*
15460 	 * The intel_legacy_cursor_update() fast path takes care
15461 	 * of avoiding the vblank waits for simple cursor
15462 	 * movement and flips. For cursor on/off and size changes,
15463 	 * we want to perform the vblank waits so that watermark
15464 	 * updates happen during the correct frames. Gen9+ have
15465 	 * double buffered watermarks and so shouldn't need this.
15466 	 *
15467 	 * Unset state->legacy_cursor_update before the call to
15468 	 * drm_atomic_helper_setup_commit() because otherwise
15469 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
15470 	 * we get FIFO underruns because we didn't wait
15471 	 * for vblank.
15472 	 *
15473 	 * FIXME doing watermarks and fb cleanup from a vblank worker
15474 	 * (assuming we had any) would solve these problems.
15475 	 */
15476 	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15477 		struct intel_crtc_state *new_crtc_state;
15478 		struct intel_crtc *crtc;
15479 		int i;
15480 
15481 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15482 			if (new_crtc_state->wm.need_postvbl_update ||
15483 			    new_crtc_state->update_wm_post)
15484 				state->base.legacy_cursor_update = false;
15485 	}
15486 
15487 	ret = intel_atomic_prepare_commit(state);
15488 	if (ret) {
15489 		drm_dbg_atomic(&dev_priv->drm,
15490 			       "Preparing state failed with %i\n", ret);
15491 		i915_sw_fence_commit(&state->commit_ready);
15492 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15493 		return ret;
15494 	}
15495 
15496 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15497 	if (!ret)
15498 		ret = drm_atomic_helper_swap_state(&state->base, true);
15499 	if (!ret)
15500 		intel_atomic_swap_global_state(state);
15501 
15502 	if (ret) {
15503 		struct intel_crtc_state *new_crtc_state;
15504 		struct intel_crtc *crtc;
15505 		int i;
15506 
15507 		i915_sw_fence_commit(&state->commit_ready);
15508 
15509 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15510 			intel_dsb_cleanup(new_crtc_state);
15511 
15512 		drm_atomic_helper_cleanup_planes(dev, &state->base);
15513 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15514 		return ret;
15515 	}
15516 	dev_priv->wm.distrust_bios_wm = false;
15517 	intel_shared_dpll_swap_state(state);
15518 	intel_atomic_track_fbs(state);
15519 
15520 	drm_atomic_state_get(&state->base);
15521 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15522 
15523 	i915_sw_fence_commit(&state->commit_ready);
15524 	if (nonblock && state->modeset) {
15525 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15526 	} else if (nonblock) {
15527 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
15528 	} else {
15529 		if (state->modeset)
15530 			flush_workqueue(dev_priv->modeset_wq);
15531 		intel_atomic_commit_tail(state);
15532 	}
15533 
15534 	return 0;
15535 }
15536 
15537 struct wait_rps_boost {
15538 	struct wait_queue_entry wait;
15539 
15540 	struct drm_crtc *crtc;
15541 	struct i915_request *request;
15542 };
15543 
15544 static int do_rps_boost(struct wait_queue_entry *_wait,
15545 			unsigned mode, int sync, void *key)
15546 {
15547 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15548 	struct i915_request *rq = wait->request;
15549 
15550 	/*
15551 	 * If we missed the vblank, but the request is already running it
15552 	 * is reasonable to assume that it will complete before the next
15553 	 * vblank without our intervention, so leave RPS alone.
15554 	 */
15555 	if (!i915_request_started(rq))
15556 		intel_rps_boost(rq);
15557 	i915_request_put(rq);
15558 
15559 	drm_crtc_vblank_put(wait->crtc);
15560 
15561 	list_del(&wait->wait.entry);
15562 	kfree(wait);
15563 	return 1;
15564 }
15565 
15566 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15567 				       struct dma_fence *fence)
15568 {
15569 	struct wait_rps_boost *wait;
15570 
15571 	if (!dma_fence_is_i915(fence))
15572 		return;
15573 
15574 	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15575 		return;
15576 
15577 	if (drm_crtc_vblank_get(crtc))
15578 		return;
15579 
15580 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15581 	if (!wait) {
15582 		drm_crtc_vblank_put(crtc);
15583 		return;
15584 	}
15585 
15586 	wait->request = to_request(dma_fence_get(fence));
15587 	wait->crtc = crtc;
15588 
15589 	wait->wait.func = do_rps_boost;
15590 	wait->wait.flags = 0;
15591 
15592 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15593 }
15594 
15595 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15596 {
15597 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15598 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15599 	struct drm_framebuffer *fb = plane_state->hw.fb;
15600 	struct i915_vma *vma;
15601 
15602 	if (plane->id == PLANE_CURSOR &&
15603 	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15604 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15605 		const int align = intel_cursor_alignment(dev_priv);
15606 		int err;
15607 
15608 		err = i915_gem_object_attach_phys(obj, align);
15609 		if (err)
15610 			return err;
15611 	}
15612 
15613 	vma = intel_pin_and_fence_fb_obj(fb,
15614 					 &plane_state->view,
15615 					 intel_plane_uses_fence(plane_state),
15616 					 &plane_state->flags);
15617 	if (IS_ERR(vma))
15618 		return PTR_ERR(vma);
15619 
15620 	plane_state->vma = vma;
15621 
15622 	return 0;
15623 }
15624 
15625 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15626 {
15627 	struct i915_vma *vma;
15628 
15629 	vma = fetch_and_zero(&old_plane_state->vma);
15630 	if (vma)
15631 		intel_unpin_fb_vma(vma, old_plane_state->flags);
15632 }
15633 
15634 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15635 {
15636 	struct i915_sched_attr attr = {
15637 		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15638 	};
15639 
15640 	i915_gem_object_wait_priority(obj, 0, &attr);
15641 }
15642 
15643 /**
15644  * intel_prepare_plane_fb - Prepare fb for usage on plane
15645  * @_plane: drm plane to prepare for
15646  * @_new_plane_state: the plane state being prepared
15647  *
15648  * Prepares a framebuffer for usage on a display plane.  Generally this
15649  * involves pinning the underlying object and updating the frontbuffer tracking
15650  * bits.  Some older platforms need special physical address handling for
15651  * cursor planes.
15652  *
15653  * Returns 0 on success, negative error code on failure.
15654  */
15655 int
15656 intel_prepare_plane_fb(struct drm_plane *_plane,
15657 		       struct drm_plane_state *_new_plane_state)
15658 {
15659 	struct intel_plane *plane = to_intel_plane(_plane);
15660 	struct intel_plane_state *new_plane_state =
15661 		to_intel_plane_state(_new_plane_state);
15662 	struct intel_atomic_state *state =
15663 		to_intel_atomic_state(new_plane_state->uapi.state);
15664 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15665 	const struct intel_plane_state *old_plane_state =
15666 		intel_atomic_get_old_plane_state(state, plane);
15667 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
15668 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
15669 	int ret;
15670 
15671 	if (old_obj) {
15672 		const struct intel_crtc_state *crtc_state =
15673 			intel_atomic_get_new_crtc_state(state,
15674 							to_intel_crtc(old_plane_state->hw.crtc));
15675 
15676 		/* Big Hammer, we also need to ensure that any pending
15677 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15678 		 * current scanout is retired before unpinning the old
15679 		 * framebuffer. Note that we rely on userspace rendering
15680 		 * into the buffer attached to the pipe they are waiting
15681 		 * on. If not, userspace generates a GPU hang with IPEHR
15682 		 * point to the MI_WAIT_FOR_EVENT.
15683 		 *
15684 		 * This should only fail upon a hung GPU, in which case we
15685 		 * can safely continue.
15686 		 */
15687 		if (intel_crtc_needs_modeset(crtc_state)) {
15688 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
15689 							      old_obj->base.resv, NULL,
15690 							      false, 0,
15691 							      GFP_KERNEL);
15692 			if (ret < 0)
15693 				return ret;
15694 		}
15695 	}
15696 
15697 	if (new_plane_state->uapi.fence) { /* explicit fencing */
15698 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
15699 						    new_plane_state->uapi.fence,
15700 						    i915_fence_timeout(dev_priv),
15701 						    GFP_KERNEL);
15702 		if (ret < 0)
15703 			return ret;
15704 	}
15705 
15706 	if (!obj)
15707 		return 0;
15708 
15709 	ret = i915_gem_object_pin_pages(obj);
15710 	if (ret)
15711 		return ret;
15712 
15713 	ret = intel_plane_pin_fb(new_plane_state);
15714 
15715 	i915_gem_object_unpin_pages(obj);
15716 	if (ret)
15717 		return ret;
15718 
15719 	fb_obj_bump_render_priority(obj);
15720 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
15721 
15722 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
15723 		struct dma_fence *fence;
15724 
15725 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
15726 						      obj->base.resv, NULL,
15727 						      false,
15728 						      i915_fence_timeout(dev_priv),
15729 						      GFP_KERNEL);
15730 		if (ret < 0)
15731 			goto unpin_fb;
15732 
15733 		fence = dma_resv_get_excl_rcu(obj->base.resv);
15734 		if (fence) {
15735 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15736 						   fence);
15737 			dma_fence_put(fence);
15738 		}
15739 	} else {
15740 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15741 					   new_plane_state->uapi.fence);
15742 	}
15743 
15744 	/*
15745 	 * We declare pageflips to be interactive and so merit a small bias
15746 	 * towards upclocking to deliver the frame on time. By only changing
15747 	 * the RPS thresholds to sample more regularly and aim for higher
15748 	 * clocks we can hopefully deliver low power workloads (like kodi)
15749 	 * that are not quite steady state without resorting to forcing
15750 	 * maximum clocks following a vblank miss (see do_rps_boost()).
15751 	 */
15752 	if (!state->rps_interactive) {
15753 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15754 		state->rps_interactive = true;
15755 	}
15756 
15757 	return 0;
15758 
15759 unpin_fb:
15760 	intel_plane_unpin_fb(new_plane_state);
15761 
15762 	return ret;
15763 }
15764 
15765 /**
15766  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15767  * @plane: drm plane to clean up for
15768  * @_old_plane_state: the state from the previous modeset
15769  *
15770  * Cleans up a framebuffer that has just been removed from a plane.
15771  */
15772 void
15773 intel_cleanup_plane_fb(struct drm_plane *plane,
15774 		       struct drm_plane_state *_old_plane_state)
15775 {
15776 	struct intel_plane_state *old_plane_state =
15777 		to_intel_plane_state(_old_plane_state);
15778 	struct intel_atomic_state *state =
15779 		to_intel_atomic_state(old_plane_state->uapi.state);
15780 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
15781 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
15782 
15783 	if (!obj)
15784 		return;
15785 
15786 	if (state->rps_interactive) {
15787 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15788 		state->rps_interactive = false;
15789 	}
15790 
15791 	/* Should only be called after a successful intel_prepare_plane_fb()! */
15792 	intel_plane_unpin_fb(old_plane_state);
15793 }
15794 
15795 /**
15796  * intel_plane_destroy - destroy a plane
15797  * @plane: plane to destroy
15798  *
15799  * Common destruction function for all types of planes (primary, cursor,
15800  * sprite).
15801  */
15802 void intel_plane_destroy(struct drm_plane *plane)
15803 {
15804 	drm_plane_cleanup(plane);
15805 	kfree(to_intel_plane(plane));
15806 }
15807 
15808 static int intel_crtc_late_register(struct drm_crtc *crtc)
15809 {
15810 	intel_crtc_debugfs_add(crtc);
15811 	return 0;
15812 }
15813 
15814 #define INTEL_CRTC_FUNCS \
15815 	.set_config = drm_atomic_helper_set_config, \
15816 	.destroy = intel_crtc_destroy, \
15817 	.page_flip = drm_atomic_helper_page_flip, \
15818 	.atomic_duplicate_state = intel_crtc_duplicate_state, \
15819 	.atomic_destroy_state = intel_crtc_destroy_state, \
15820 	.set_crc_source = intel_crtc_set_crc_source, \
15821 	.verify_crc_source = intel_crtc_verify_crc_source, \
15822 	.get_crc_sources = intel_crtc_get_crc_sources, \
15823 	.late_register = intel_crtc_late_register
15824 
15825 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15826 	INTEL_CRTC_FUNCS,
15827 
15828 	.get_vblank_counter = g4x_get_vblank_counter,
15829 	.enable_vblank = bdw_enable_vblank,
15830 	.disable_vblank = bdw_disable_vblank,
15831 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15832 };
15833 
15834 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15835 	INTEL_CRTC_FUNCS,
15836 
15837 	.get_vblank_counter = g4x_get_vblank_counter,
15838 	.enable_vblank = ilk_enable_vblank,
15839 	.disable_vblank = ilk_disable_vblank,
15840 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15841 };
15842 
15843 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15844 	INTEL_CRTC_FUNCS,
15845 
15846 	.get_vblank_counter = g4x_get_vblank_counter,
15847 	.enable_vblank = i965_enable_vblank,
15848 	.disable_vblank = i965_disable_vblank,
15849 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15850 };
15851 
15852 static const struct drm_crtc_funcs i965_crtc_funcs = {
15853 	INTEL_CRTC_FUNCS,
15854 
15855 	.get_vblank_counter = i915_get_vblank_counter,
15856 	.enable_vblank = i965_enable_vblank,
15857 	.disable_vblank = i965_disable_vblank,
15858 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15859 };
15860 
15861 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15862 	INTEL_CRTC_FUNCS,
15863 
15864 	.get_vblank_counter = i915_get_vblank_counter,
15865 	.enable_vblank = i915gm_enable_vblank,
15866 	.disable_vblank = i915gm_disable_vblank,
15867 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15868 };
15869 
15870 static const struct drm_crtc_funcs i915_crtc_funcs = {
15871 	INTEL_CRTC_FUNCS,
15872 
15873 	.get_vblank_counter = i915_get_vblank_counter,
15874 	.enable_vblank = i8xx_enable_vblank,
15875 	.disable_vblank = i8xx_disable_vblank,
15876 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15877 };
15878 
15879 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15880 	INTEL_CRTC_FUNCS,
15881 
15882 	/* no hw vblank counter */
15883 	.enable_vblank = i8xx_enable_vblank,
15884 	.disable_vblank = i8xx_disable_vblank,
15885 	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
15886 };
15887 
15888 static struct intel_crtc *intel_crtc_alloc(void)
15889 {
15890 	struct intel_crtc_state *crtc_state;
15891 	struct intel_crtc *crtc;
15892 
15893 	crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
15894 	if (!crtc)
15895 		return ERR_PTR(-ENOMEM);
15896 
15897 	crtc_state = intel_crtc_state_alloc(crtc);
15898 	if (!crtc_state) {
15899 		kfree(crtc);
15900 		return ERR_PTR(-ENOMEM);
15901 	}
15902 
15903 	crtc->base.state = &crtc_state->uapi;
15904 	crtc->config = crtc_state;
15905 
15906 	return crtc;
15907 }
15908 
15909 static void intel_crtc_free(struct intel_crtc *crtc)
15910 {
15911 	intel_crtc_destroy_state(&crtc->base, crtc->base.state);
15912 	kfree(crtc);
15913 }
15914 
15915 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
15916 {
15917 	struct intel_plane *plane;
15918 
15919 	for_each_intel_plane(&dev_priv->drm, plane) {
15920 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
15921 								  plane->pipe);
15922 
15923 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
15924 	}
15925 }
15926 
15927 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15928 {
15929 	struct intel_plane *primary, *cursor;
15930 	const struct drm_crtc_funcs *funcs;
15931 	struct intel_crtc *crtc;
15932 	int sprite, ret;
15933 
15934 	crtc = intel_crtc_alloc();
15935 	if (IS_ERR(crtc))
15936 		return PTR_ERR(crtc);
15937 
15938 	crtc->pipe = pipe;
15939 	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
15940 
15941 	primary = intel_primary_plane_create(dev_priv, pipe);
15942 	if (IS_ERR(primary)) {
15943 		ret = PTR_ERR(primary);
15944 		goto fail;
15945 	}
15946 	crtc->plane_ids_mask |= BIT(primary->id);
15947 
15948 	for_each_sprite(dev_priv, pipe, sprite) {
15949 		struct intel_plane *plane;
15950 
15951 		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15952 		if (IS_ERR(plane)) {
15953 			ret = PTR_ERR(plane);
15954 			goto fail;
15955 		}
15956 		crtc->plane_ids_mask |= BIT(plane->id);
15957 	}
15958 
15959 	cursor = intel_cursor_plane_create(dev_priv, pipe);
15960 	if (IS_ERR(cursor)) {
15961 		ret = PTR_ERR(cursor);
15962 		goto fail;
15963 	}
15964 	crtc->plane_ids_mask |= BIT(cursor->id);
15965 
15966 	if (HAS_GMCH(dev_priv)) {
15967 		if (IS_CHERRYVIEW(dev_priv) ||
15968 		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15969 			funcs = &g4x_crtc_funcs;
15970 		else if (IS_GEN(dev_priv, 4))
15971 			funcs = &i965_crtc_funcs;
15972 		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15973 			funcs = &i915gm_crtc_funcs;
15974 		else if (IS_GEN(dev_priv, 3))
15975 			funcs = &i915_crtc_funcs;
15976 		else
15977 			funcs = &i8xx_crtc_funcs;
15978 	} else {
15979 		if (INTEL_GEN(dev_priv) >= 8)
15980 			funcs = &bdw_crtc_funcs;
15981 		else
15982 			funcs = &ilk_crtc_funcs;
15983 	}
15984 
15985 	ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
15986 					&primary->base, &cursor->base,
15987 					funcs, "pipe %c", pipe_name(pipe));
15988 	if (ret)
15989 		goto fail;
15990 
15991 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15992 	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15993 	dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
15994 
15995 	if (INTEL_GEN(dev_priv) < 9) {
15996 		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15997 
15998 		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15999 		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16000 		dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16001 	}
16002 
16003 	if (INTEL_GEN(dev_priv) >= 10)
16004 		drm_crtc_create_scaling_filter_property(&crtc->base,
16005 						BIT(DRM_SCALING_FILTER_DEFAULT) |
16006 						BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
16007 
16008 	intel_color_init(crtc);
16009 
16010 	intel_crtc_crc_init(crtc);
16011 
16012 	drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
16013 
16014 	return 0;
16015 
16016 fail:
16017 	intel_crtc_free(crtc);
16018 
16019 	return ret;
16020 }
16021 
16022 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16023 				      struct drm_file *file)
16024 {
16025 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16026 	struct drm_crtc *drmmode_crtc;
16027 	struct intel_crtc *crtc;
16028 
16029 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16030 	if (!drmmode_crtc)
16031 		return -ENOENT;
16032 
16033 	crtc = to_intel_crtc(drmmode_crtc);
16034 	pipe_from_crtc_id->pipe = crtc->pipe;
16035 
16036 	return 0;
16037 }
16038 
16039 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16040 {
16041 	struct drm_device *dev = encoder->base.dev;
16042 	struct intel_encoder *source_encoder;
16043 	u32 possible_clones = 0;
16044 
16045 	for_each_intel_encoder(dev, source_encoder) {
16046 		if (encoders_cloneable(encoder, source_encoder))
16047 			possible_clones |= drm_encoder_mask(&source_encoder->base);
16048 	}
16049 
16050 	return possible_clones;
16051 }
16052 
16053 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16054 {
16055 	struct drm_device *dev = encoder->base.dev;
16056 	struct intel_crtc *crtc;
16057 	u32 possible_crtcs = 0;
16058 
16059 	for_each_intel_crtc(dev, crtc) {
16060 		if (encoder->pipe_mask & BIT(crtc->pipe))
16061 			possible_crtcs |= drm_crtc_mask(&crtc->base);
16062 	}
16063 
16064 	return possible_crtcs;
16065 }
16066 
16067 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16068 {
16069 	if (!IS_MOBILE(dev_priv))
16070 		return false;
16071 
16072 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16073 		return false;
16074 
16075 	if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16076 		return false;
16077 
16078 	return true;
16079 }
16080 
16081 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16082 {
16083 	if (INTEL_GEN(dev_priv) >= 9)
16084 		return false;
16085 
16086 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16087 		return false;
16088 
16089 	if (HAS_PCH_LPT_H(dev_priv) &&
16090 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16091 		return false;
16092 
16093 	/* DDI E can't be used if DDI A requires 4 lanes */
16094 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16095 		return false;
16096 
16097 	if (!dev_priv->vbt.int_crt_support)
16098 		return false;
16099 
16100 	return true;
16101 }
16102 
16103 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16104 {
16105 	int pps_num;
16106 	int pps_idx;
16107 
16108 	if (HAS_DDI(dev_priv))
16109 		return;
16110 	/*
16111 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
16112 	 * everywhere where registers can be write protected.
16113 	 */
16114 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16115 		pps_num = 2;
16116 	else
16117 		pps_num = 1;
16118 
16119 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16120 		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16121 
16122 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16123 		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16124 	}
16125 }
16126 
16127 static void intel_pps_init(struct drm_i915_private *dev_priv)
16128 {
16129 	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16130 		dev_priv->pps_mmio_base = PCH_PPS_BASE;
16131 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16132 		dev_priv->pps_mmio_base = VLV_PPS_BASE;
16133 	else
16134 		dev_priv->pps_mmio_base = PPS_BASE;
16135 
16136 	intel_pps_unlock_regs_wa(dev_priv);
16137 }
16138 
16139 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16140 {
16141 	struct intel_encoder *encoder;
16142 	bool dpd_is_edp = false;
16143 
16144 	intel_pps_init(dev_priv);
16145 
16146 	if (!HAS_DISPLAY(dev_priv))
16147 		return;
16148 
16149 	if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
16150 		intel_ddi_init(dev_priv, PORT_A);
16151 		intel_ddi_init(dev_priv, PORT_B);
16152 		intel_ddi_init(dev_priv, PORT_TC1);
16153 		intel_ddi_init(dev_priv, PORT_TC2);
16154 	} else if (INTEL_GEN(dev_priv) >= 12) {
16155 		intel_ddi_init(dev_priv, PORT_A);
16156 		intel_ddi_init(dev_priv, PORT_B);
16157 		intel_ddi_init(dev_priv, PORT_TC1);
16158 		intel_ddi_init(dev_priv, PORT_TC2);
16159 		intel_ddi_init(dev_priv, PORT_TC3);
16160 		intel_ddi_init(dev_priv, PORT_TC4);
16161 		intel_ddi_init(dev_priv, PORT_TC5);
16162 		intel_ddi_init(dev_priv, PORT_TC6);
16163 		icl_dsi_init(dev_priv);
16164 	} else if (IS_JSL_EHL(dev_priv)) {
16165 		intel_ddi_init(dev_priv, PORT_A);
16166 		intel_ddi_init(dev_priv, PORT_B);
16167 		intel_ddi_init(dev_priv, PORT_C);
16168 		intel_ddi_init(dev_priv, PORT_D);
16169 		icl_dsi_init(dev_priv);
16170 	} else if (IS_GEN(dev_priv, 11)) {
16171 		intel_ddi_init(dev_priv, PORT_A);
16172 		intel_ddi_init(dev_priv, PORT_B);
16173 		intel_ddi_init(dev_priv, PORT_C);
16174 		intel_ddi_init(dev_priv, PORT_D);
16175 		intel_ddi_init(dev_priv, PORT_E);
16176 		/*
16177 		 * On some ICL SKUs port F is not present. No strap bits for
16178 		 * this, so rely on VBT.
16179 		 * Work around broken VBTs on SKUs known to have no port F.
16180 		 */
16181 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
16182 		    intel_bios_is_port_present(dev_priv, PORT_F))
16183 			intel_ddi_init(dev_priv, PORT_F);
16184 
16185 		icl_dsi_init(dev_priv);
16186 	} else if (IS_GEN9_LP(dev_priv)) {
16187 		/*
16188 		 * FIXME: Broxton doesn't support port detection via the
16189 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16190 		 * detect the ports.
16191 		 */
16192 		intel_ddi_init(dev_priv, PORT_A);
16193 		intel_ddi_init(dev_priv, PORT_B);
16194 		intel_ddi_init(dev_priv, PORT_C);
16195 
16196 		vlv_dsi_init(dev_priv);
16197 	} else if (HAS_DDI(dev_priv)) {
16198 		int found;
16199 
16200 		if (intel_ddi_crt_present(dev_priv))
16201 			intel_crt_init(dev_priv);
16202 
16203 		/*
16204 		 * Haswell uses DDI functions to detect digital outputs.
16205 		 * On SKL pre-D0 the strap isn't connected, so we assume
16206 		 * it's there.
16207 		 */
16208 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16209 		/* WaIgnoreDDIAStrap: skl */
16210 		if (found || IS_GEN9_BC(dev_priv))
16211 			intel_ddi_init(dev_priv, PORT_A);
16212 
16213 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16214 		 * register */
16215 		found = intel_de_read(dev_priv, SFUSE_STRAP);
16216 
16217 		if (found & SFUSE_STRAP_DDIB_DETECTED)
16218 			intel_ddi_init(dev_priv, PORT_B);
16219 		if (found & SFUSE_STRAP_DDIC_DETECTED)
16220 			intel_ddi_init(dev_priv, PORT_C);
16221 		if (found & SFUSE_STRAP_DDID_DETECTED)
16222 			intel_ddi_init(dev_priv, PORT_D);
16223 		if (found & SFUSE_STRAP_DDIF_DETECTED)
16224 			intel_ddi_init(dev_priv, PORT_F);
16225 		/*
16226 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16227 		 */
16228 		if (IS_GEN9_BC(dev_priv) &&
16229 		    intel_bios_is_port_present(dev_priv, PORT_E))
16230 			intel_ddi_init(dev_priv, PORT_E);
16231 
16232 	} else if (HAS_PCH_SPLIT(dev_priv)) {
16233 		int found;
16234 
16235 		/*
16236 		 * intel_edp_init_connector() depends on this completing first,
16237 		 * to prevent the registration of both eDP and LVDS and the
16238 		 * incorrect sharing of the PPS.
16239 		 */
16240 		intel_lvds_init(dev_priv);
16241 		intel_crt_init(dev_priv);
16242 
16243 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16244 
16245 		if (ilk_has_edp_a(dev_priv))
16246 			intel_dp_init(dev_priv, DP_A, PORT_A);
16247 
16248 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
16249 			/* PCH SDVOB multiplex with HDMIB */
16250 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16251 			if (!found)
16252 				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16253 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
16254 				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16255 		}
16256 
16257 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
16258 			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16259 
16260 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
16261 			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16262 
16263 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
16264 			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16265 
16266 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
16267 			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16268 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16269 		bool has_edp, has_port;
16270 
16271 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16272 			intel_crt_init(dev_priv);
16273 
16274 		/*
16275 		 * The DP_DETECTED bit is the latched state of the DDC
16276 		 * SDA pin at boot. However since eDP doesn't require DDC
16277 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
16278 		 * eDP ports may have been muxed to an alternate function.
16279 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
16280 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
16281 		 * detect eDP ports.
16282 		 *
16283 		 * Sadly the straps seem to be missing sometimes even for HDMI
16284 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16285 		 * and VBT for the presence of the port. Additionally we can't
16286 		 * trust the port type the VBT declares as we've seen at least
16287 		 * HDMI ports that the VBT claim are DP or eDP.
16288 		 */
16289 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16290 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16291 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
16292 			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16293 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16294 			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16295 
16296 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16297 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16298 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
16299 			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16300 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16301 			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16302 
16303 		if (IS_CHERRYVIEW(dev_priv)) {
16304 			/*
16305 			 * eDP not supported on port D,
16306 			 * so no need to worry about it
16307 			 */
16308 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16309 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
16310 				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16311 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
16312 				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16313 		}
16314 
16315 		vlv_dsi_init(dev_priv);
16316 	} else if (IS_PINEVIEW(dev_priv)) {
16317 		intel_lvds_init(dev_priv);
16318 		intel_crt_init(dev_priv);
16319 	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16320 		bool found = false;
16321 
16322 		if (IS_MOBILE(dev_priv))
16323 			intel_lvds_init(dev_priv);
16324 
16325 		intel_crt_init(dev_priv);
16326 
16327 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16328 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
16329 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16330 			if (!found && IS_G4X(dev_priv)) {
16331 				drm_dbg_kms(&dev_priv->drm,
16332 					    "probing HDMI on SDVOB\n");
16333 				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16334 			}
16335 
16336 			if (!found && IS_G4X(dev_priv))
16337 				intel_dp_init(dev_priv, DP_B, PORT_B);
16338 		}
16339 
16340 		/* Before G4X SDVOC doesn't have its own detect register */
16341 
16342 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16343 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
16344 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16345 		}
16346 
16347 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
16348 
16349 			if (IS_G4X(dev_priv)) {
16350 				drm_dbg_kms(&dev_priv->drm,
16351 					    "probing HDMI on SDVOC\n");
16352 				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16353 			}
16354 			if (IS_G4X(dev_priv))
16355 				intel_dp_init(dev_priv, DP_C, PORT_C);
16356 		}
16357 
16358 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
16359 			intel_dp_init(dev_priv, DP_D, PORT_D);
16360 
16361 		if (SUPPORTS_TV(dev_priv))
16362 			intel_tv_init(dev_priv);
16363 	} else if (IS_GEN(dev_priv, 2)) {
16364 		if (IS_I85X(dev_priv))
16365 			intel_lvds_init(dev_priv);
16366 
16367 		intel_crt_init(dev_priv);
16368 		intel_dvo_init(dev_priv);
16369 	}
16370 
16371 	intel_psr_init(dev_priv);
16372 
16373 	for_each_intel_encoder(&dev_priv->drm, encoder) {
16374 		encoder->base.possible_crtcs =
16375 			intel_encoder_possible_crtcs(encoder);
16376 		encoder->base.possible_clones =
16377 			intel_encoder_possible_clones(encoder);
16378 	}
16379 
16380 	intel_init_pch_refclk(dev_priv);
16381 
16382 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16383 }
16384 
16385 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16386 {
16387 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16388 
16389 	drm_framebuffer_cleanup(fb);
16390 	intel_frontbuffer_put(intel_fb->frontbuffer);
16391 
16392 	kfree(intel_fb);
16393 }
16394 
16395 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16396 						struct drm_file *file,
16397 						unsigned int *handle)
16398 {
16399 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16400 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
16401 
16402 	if (obj->userptr.mm) {
16403 		drm_dbg(&i915->drm,
16404 			"attempting to use a userptr for a framebuffer, denied\n");
16405 		return -EINVAL;
16406 	}
16407 
16408 	return drm_gem_handle_create(file, &obj->base, handle);
16409 }
16410 
16411 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16412 					struct drm_file *file,
16413 					unsigned flags, unsigned color,
16414 					struct drm_clip_rect *clips,
16415 					unsigned num_clips)
16416 {
16417 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16418 
16419 	i915_gem_object_flush_if_display(obj);
16420 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16421 
16422 	return 0;
16423 }
16424 
16425 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16426 	.destroy = intel_user_framebuffer_destroy,
16427 	.create_handle = intel_user_framebuffer_create_handle,
16428 	.dirty = intel_user_framebuffer_dirty,
16429 };
16430 
16431 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16432 				  struct drm_i915_gem_object *obj,
16433 				  struct drm_mode_fb_cmd2 *mode_cmd)
16434 {
16435 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16436 	struct drm_framebuffer *fb = &intel_fb->base;
16437 	u32 max_stride;
16438 	unsigned int tiling, stride;
16439 	int ret = -EINVAL;
16440 	int i;
16441 
16442 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16443 	if (!intel_fb->frontbuffer)
16444 		return -ENOMEM;
16445 
16446 	i915_gem_object_lock(obj, NULL);
16447 	tiling = i915_gem_object_get_tiling(obj);
16448 	stride = i915_gem_object_get_stride(obj);
16449 	i915_gem_object_unlock(obj);
16450 
16451 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16452 		/*
16453 		 * If there's a fence, enforce that
16454 		 * the fb modifier and tiling mode match.
16455 		 */
16456 		if (tiling != I915_TILING_NONE &&
16457 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16458 			drm_dbg_kms(&dev_priv->drm,
16459 				    "tiling_mode doesn't match fb modifier\n");
16460 			goto err;
16461 		}
16462 	} else {
16463 		if (tiling == I915_TILING_X) {
16464 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16465 		} else if (tiling == I915_TILING_Y) {
16466 			drm_dbg_kms(&dev_priv->drm,
16467 				    "No Y tiling for legacy addfb\n");
16468 			goto err;
16469 		}
16470 	}
16471 
16472 	if (!drm_any_plane_has_format(&dev_priv->drm,
16473 				      mode_cmd->pixel_format,
16474 				      mode_cmd->modifier[0])) {
16475 		struct drm_format_name_buf format_name;
16476 
16477 		drm_dbg_kms(&dev_priv->drm,
16478 			    "unsupported pixel format %s / modifier 0x%llx\n",
16479 			    drm_get_format_name(mode_cmd->pixel_format,
16480 						&format_name),
16481 			    mode_cmd->modifier[0]);
16482 		goto err;
16483 	}
16484 
16485 	/*
16486 	 * gen2/3 display engine uses the fence if present,
16487 	 * so the tiling mode must match the fb modifier exactly.
16488 	 */
16489 	if (INTEL_GEN(dev_priv) < 4 &&
16490 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16491 		drm_dbg_kms(&dev_priv->drm,
16492 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
16493 		goto err;
16494 	}
16495 
16496 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16497 					 mode_cmd->modifier[0]);
16498 	if (mode_cmd->pitches[0] > max_stride) {
16499 		drm_dbg_kms(&dev_priv->drm,
16500 			    "%s pitch (%u) must be at most %d\n",
16501 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16502 			    "tiled" : "linear",
16503 			    mode_cmd->pitches[0], max_stride);
16504 		goto err;
16505 	}
16506 
16507 	/*
16508 	 * If there's a fence, enforce that
16509 	 * the fb pitch and fence stride match.
16510 	 */
16511 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16512 		drm_dbg_kms(&dev_priv->drm,
16513 			    "pitch (%d) must match tiling stride (%d)\n",
16514 			    mode_cmd->pitches[0], stride);
16515 		goto err;
16516 	}
16517 
16518 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16519 	if (mode_cmd->offsets[0] != 0) {
16520 		drm_dbg_kms(&dev_priv->drm,
16521 			    "plane 0 offset (0x%08x) must be 0\n",
16522 			    mode_cmd->offsets[0]);
16523 		goto err;
16524 	}
16525 
16526 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16527 
16528 	for (i = 0; i < fb->format->num_planes; i++) {
16529 		u32 stride_alignment;
16530 
16531 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16532 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
16533 				    i);
16534 			goto err;
16535 		}
16536 
16537 		stride_alignment = intel_fb_stride_alignment(fb, i);
16538 		if (fb->pitches[i] & (stride_alignment - 1)) {
16539 			drm_dbg_kms(&dev_priv->drm,
16540 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
16541 				    i, fb->pitches[i], stride_alignment);
16542 			goto err;
16543 		}
16544 
16545 		if (is_gen12_ccs_plane(fb, i)) {
16546 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
16547 
16548 			if (fb->pitches[i] != ccs_aux_stride) {
16549 				drm_dbg_kms(&dev_priv->drm,
16550 					    "ccs aux plane %d pitch (%d) must be %d\n",
16551 					    i,
16552 					    fb->pitches[i], ccs_aux_stride);
16553 				goto err;
16554 			}
16555 		}
16556 
16557 		fb->obj[i] = &obj->base;
16558 	}
16559 
16560 	ret = intel_fill_fb_info(dev_priv, fb);
16561 	if (ret)
16562 		goto err;
16563 
16564 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16565 	if (ret) {
16566 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
16567 		goto err;
16568 	}
16569 
16570 	return 0;
16571 
16572 err:
16573 	intel_frontbuffer_put(intel_fb->frontbuffer);
16574 	return ret;
16575 }
16576 
16577 static struct drm_framebuffer *
16578 intel_user_framebuffer_create(struct drm_device *dev,
16579 			      struct drm_file *filp,
16580 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
16581 {
16582 	struct drm_framebuffer *fb;
16583 	struct drm_i915_gem_object *obj;
16584 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16585 
16586 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16587 	if (!obj)
16588 		return ERR_PTR(-ENOENT);
16589 
16590 	fb = intel_framebuffer_create(obj, &mode_cmd);
16591 	i915_gem_object_put(obj);
16592 
16593 	return fb;
16594 }
16595 
16596 static enum drm_mode_status
16597 intel_mode_valid(struct drm_device *dev,
16598 		 const struct drm_display_mode *mode)
16599 {
16600 	struct drm_i915_private *dev_priv = to_i915(dev);
16601 	int hdisplay_max, htotal_max;
16602 	int vdisplay_max, vtotal_max;
16603 
16604 	/*
16605 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
16606 	 * of DBLSCAN modes to the output's mode list when they detect
16607 	 * the scaling mode property on the connector. And they don't
16608 	 * ask the kernel to validate those modes in any way until
16609 	 * modeset time at which point the client gets a protocol error.
16610 	 * So in order to not upset those clients we silently ignore the
16611 	 * DBLSCAN flag on such connectors. For other connectors we will
16612 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
16613 	 * And we always reject DBLSCAN modes in connector->mode_valid()
16614 	 * as we never want such modes on the connector's mode list.
16615 	 */
16616 
16617 	if (mode->vscan > 1)
16618 		return MODE_NO_VSCAN;
16619 
16620 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
16621 		return MODE_H_ILLEGAL;
16622 
16623 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16624 			   DRM_MODE_FLAG_NCSYNC |
16625 			   DRM_MODE_FLAG_PCSYNC))
16626 		return MODE_HSYNC;
16627 
16628 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
16629 			   DRM_MODE_FLAG_PIXMUX |
16630 			   DRM_MODE_FLAG_CLKDIV2))
16631 		return MODE_BAD;
16632 
16633 	/* Transcoder timing limits */
16634 	if (INTEL_GEN(dev_priv) >= 11) {
16635 		hdisplay_max = 16384;
16636 		vdisplay_max = 8192;
16637 		htotal_max = 16384;
16638 		vtotal_max = 8192;
16639 	} else if (INTEL_GEN(dev_priv) >= 9 ||
16640 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16641 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16642 		vdisplay_max = 4096;
16643 		htotal_max = 8192;
16644 		vtotal_max = 8192;
16645 	} else if (INTEL_GEN(dev_priv) >= 3) {
16646 		hdisplay_max = 4096;
16647 		vdisplay_max = 4096;
16648 		htotal_max = 8192;
16649 		vtotal_max = 8192;
16650 	} else {
16651 		hdisplay_max = 2048;
16652 		vdisplay_max = 2048;
16653 		htotal_max = 4096;
16654 		vtotal_max = 4096;
16655 	}
16656 
16657 	if (mode->hdisplay > hdisplay_max ||
16658 	    mode->hsync_start > htotal_max ||
16659 	    mode->hsync_end > htotal_max ||
16660 	    mode->htotal > htotal_max)
16661 		return MODE_H_ILLEGAL;
16662 
16663 	if (mode->vdisplay > vdisplay_max ||
16664 	    mode->vsync_start > vtotal_max ||
16665 	    mode->vsync_end > vtotal_max ||
16666 	    mode->vtotal > vtotal_max)
16667 		return MODE_V_ILLEGAL;
16668 
16669 	if (INTEL_GEN(dev_priv) >= 5) {
16670 		if (mode->hdisplay < 64 ||
16671 		    mode->htotal - mode->hdisplay < 32)
16672 			return MODE_H_ILLEGAL;
16673 
16674 		if (mode->vtotal - mode->vdisplay < 5)
16675 			return MODE_V_ILLEGAL;
16676 	} else {
16677 		if (mode->htotal - mode->hdisplay < 32)
16678 			return MODE_H_ILLEGAL;
16679 
16680 		if (mode->vtotal - mode->vdisplay < 3)
16681 			return MODE_V_ILLEGAL;
16682 	}
16683 
16684 	return MODE_OK;
16685 }
16686 
16687 enum drm_mode_status
16688 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16689 				const struct drm_display_mode *mode,
16690 				bool bigjoiner)
16691 {
16692 	int plane_width_max, plane_height_max;
16693 
16694 	/*
16695 	 * intel_mode_valid() should be
16696 	 * sufficient on older platforms.
16697 	 */
16698 	if (INTEL_GEN(dev_priv) < 9)
16699 		return MODE_OK;
16700 
16701 	/*
16702 	 * Most people will probably want a fullscreen
16703 	 * plane so let's not advertize modes that are
16704 	 * too big for that.
16705 	 */
16706 	if (INTEL_GEN(dev_priv) >= 11) {
16707 		plane_width_max = 5120 << bigjoiner;
16708 		plane_height_max = 4320;
16709 	} else {
16710 		plane_width_max = 5120;
16711 		plane_height_max = 4096;
16712 	}
16713 
16714 	if (mode->hdisplay > plane_width_max)
16715 		return MODE_H_ILLEGAL;
16716 
16717 	if (mode->vdisplay > plane_height_max)
16718 		return MODE_V_ILLEGAL;
16719 
16720 	return MODE_OK;
16721 }
16722 
16723 static const struct drm_mode_config_funcs intel_mode_funcs = {
16724 	.fb_create = intel_user_framebuffer_create,
16725 	.get_format_info = intel_get_format_info,
16726 	.output_poll_changed = intel_fbdev_output_poll_changed,
16727 	.mode_valid = intel_mode_valid,
16728 	.atomic_check = intel_atomic_check,
16729 	.atomic_commit = intel_atomic_commit,
16730 	.atomic_state_alloc = intel_atomic_state_alloc,
16731 	.atomic_state_clear = intel_atomic_state_clear,
16732 	.atomic_state_free = intel_atomic_state_free,
16733 };
16734 
16735 /**
16736  * intel_init_display_hooks - initialize the display modesetting hooks
16737  * @dev_priv: device private
16738  */
16739 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16740 {
16741 	intel_init_cdclk_hooks(dev_priv);
16742 
16743 	if (INTEL_GEN(dev_priv) >= 9) {
16744 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
16745 		dev_priv->display.get_initial_plane_config =
16746 			skl_get_initial_plane_config;
16747 		dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
16748 		dev_priv->display.crtc_enable = hsw_crtc_enable;
16749 		dev_priv->display.crtc_disable = hsw_crtc_disable;
16750 	} else if (HAS_DDI(dev_priv)) {
16751 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
16752 		dev_priv->display.get_initial_plane_config =
16753 			i9xx_get_initial_plane_config;
16754 		dev_priv->display.crtc_compute_clock =
16755 			hsw_crtc_compute_clock;
16756 		dev_priv->display.crtc_enable = hsw_crtc_enable;
16757 		dev_priv->display.crtc_disable = hsw_crtc_disable;
16758 	} else if (HAS_PCH_SPLIT(dev_priv)) {
16759 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
16760 		dev_priv->display.get_initial_plane_config =
16761 			i9xx_get_initial_plane_config;
16762 		dev_priv->display.crtc_compute_clock =
16763 			ilk_crtc_compute_clock;
16764 		dev_priv->display.crtc_enable = ilk_crtc_enable;
16765 		dev_priv->display.crtc_disable = ilk_crtc_disable;
16766 	} else if (IS_CHERRYVIEW(dev_priv)) {
16767 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16768 		dev_priv->display.get_initial_plane_config =
16769 			i9xx_get_initial_plane_config;
16770 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16771 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
16772 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16773 	} else if (IS_VALLEYVIEW(dev_priv)) {
16774 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16775 		dev_priv->display.get_initial_plane_config =
16776 			i9xx_get_initial_plane_config;
16777 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16778 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
16779 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16780 	} else if (IS_G4X(dev_priv)) {
16781 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16782 		dev_priv->display.get_initial_plane_config =
16783 			i9xx_get_initial_plane_config;
16784 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16785 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16786 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16787 	} else if (IS_PINEVIEW(dev_priv)) {
16788 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16789 		dev_priv->display.get_initial_plane_config =
16790 			i9xx_get_initial_plane_config;
16791 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16792 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16793 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16794 	} else if (!IS_GEN(dev_priv, 2)) {
16795 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16796 		dev_priv->display.get_initial_plane_config =
16797 			i9xx_get_initial_plane_config;
16798 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16799 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16800 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16801 	} else {
16802 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16803 		dev_priv->display.get_initial_plane_config =
16804 			i9xx_get_initial_plane_config;
16805 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16806 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16807 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16808 	}
16809 
16810 	if (IS_GEN(dev_priv, 5)) {
16811 		dev_priv->display.fdi_link_train = ilk_fdi_link_train;
16812 	} else if (IS_GEN(dev_priv, 6)) {
16813 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16814 	} else if (IS_IVYBRIDGE(dev_priv)) {
16815 		/* FIXME: detect B0+ stepping and use auto training */
16816 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16817 	}
16818 
16819 	if (INTEL_GEN(dev_priv) >= 9)
16820 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16821 	else
16822 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16823 
16824 }
16825 
16826 void intel_modeset_init_hw(struct drm_i915_private *i915)
16827 {
16828 	struct intel_cdclk_state *cdclk_state =
16829 		to_intel_cdclk_state(i915->cdclk.obj.state);
16830 	struct intel_dbuf_state *dbuf_state =
16831 		to_intel_dbuf_state(i915->dbuf.obj.state);
16832 
16833 	intel_update_cdclk(i915);
16834 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
16835 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
16836 
16837 	dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
16838 }
16839 
16840 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
16841 {
16842 	struct drm_plane *plane;
16843 	struct intel_crtc *crtc;
16844 
16845 	for_each_intel_crtc(state->dev, crtc) {
16846 		struct intel_crtc_state *crtc_state;
16847 
16848 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
16849 		if (IS_ERR(crtc_state))
16850 			return PTR_ERR(crtc_state);
16851 
16852 		if (crtc_state->hw.active) {
16853 			/*
16854 			 * Preserve the inherited flag to avoid
16855 			 * taking the full modeset path.
16856 			 */
16857 			crtc_state->inherited = true;
16858 		}
16859 	}
16860 
16861 	drm_for_each_plane(plane, state->dev) {
16862 		struct drm_plane_state *plane_state;
16863 
16864 		plane_state = drm_atomic_get_plane_state(state, plane);
16865 		if (IS_ERR(plane_state))
16866 			return PTR_ERR(plane_state);
16867 	}
16868 
16869 	return 0;
16870 }
16871 
16872 /*
16873  * Calculate what we think the watermarks should be for the state we've read
16874  * out of the hardware and then immediately program those watermarks so that
16875  * we ensure the hardware settings match our internal state.
16876  *
16877  * We can calculate what we think WM's should be by creating a duplicate of the
16878  * current state (which was constructed during hardware readout) and running it
16879  * through the atomic check code to calculate new watermark values in the
16880  * state object.
16881  */
16882 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
16883 {
16884 	struct drm_atomic_state *state;
16885 	struct intel_atomic_state *intel_state;
16886 	struct intel_crtc *crtc;
16887 	struct intel_crtc_state *crtc_state;
16888 	struct drm_modeset_acquire_ctx ctx;
16889 	int ret;
16890 	int i;
16891 
16892 	/* Only supported on platforms that use atomic watermark design */
16893 	if (!dev_priv->display.optimize_watermarks)
16894 		return;
16895 
16896 	state = drm_atomic_state_alloc(&dev_priv->drm);
16897 	if (drm_WARN_ON(&dev_priv->drm, !state))
16898 		return;
16899 
16900 	intel_state = to_intel_atomic_state(state);
16901 
16902 	drm_modeset_acquire_init(&ctx, 0);
16903 
16904 retry:
16905 	state->acquire_ctx = &ctx;
16906 
16907 	/*
16908 	 * Hardware readout is the only time we don't want to calculate
16909 	 * intermediate watermarks (since we don't trust the current
16910 	 * watermarks).
16911 	 */
16912 	if (!HAS_GMCH(dev_priv))
16913 		intel_state->skip_intermediate_wm = true;
16914 
16915 	ret = sanitize_watermarks_add_affected(state);
16916 	if (ret)
16917 		goto fail;
16918 
16919 	ret = intel_atomic_check(&dev_priv->drm, state);
16920 	if (ret)
16921 		goto fail;
16922 
16923 	/* Write calculated watermark values back */
16924 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16925 		crtc_state->wm.need_postvbl_update = true;
16926 		dev_priv->display.optimize_watermarks(intel_state, crtc);
16927 
16928 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16929 	}
16930 
16931 fail:
16932 	if (ret == -EDEADLK) {
16933 		drm_atomic_state_clear(state);
16934 		drm_modeset_backoff(&ctx);
16935 		goto retry;
16936 	}
16937 
16938 	/*
16939 	 * If we fail here, it means that the hardware appears to be
16940 	 * programmed in a way that shouldn't be possible, given our
16941 	 * understanding of watermark requirements.  This might mean a
16942 	 * mistake in the hardware readout code or a mistake in the
16943 	 * watermark calculations for a given platform.  Raise a WARN
16944 	 * so that this is noticeable.
16945 	 *
16946 	 * If this actually happens, we'll have to just leave the
16947 	 * BIOS-programmed watermarks untouched and hope for the best.
16948 	 */
16949 	drm_WARN(&dev_priv->drm, ret,
16950 		 "Could not determine valid watermarks for inherited state\n");
16951 
16952 	drm_atomic_state_put(state);
16953 
16954 	drm_modeset_drop_locks(&ctx);
16955 	drm_modeset_acquire_fini(&ctx);
16956 }
16957 
16958 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16959 {
16960 	if (IS_GEN(dev_priv, 5)) {
16961 		u32 fdi_pll_clk =
16962 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16963 
16964 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16965 	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16966 		dev_priv->fdi_pll_freq = 270000;
16967 	} else {
16968 		return;
16969 	}
16970 
16971 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16972 }
16973 
16974 static int intel_initial_commit(struct drm_device *dev)
16975 {
16976 	struct drm_atomic_state *state = NULL;
16977 	struct drm_modeset_acquire_ctx ctx;
16978 	struct intel_crtc *crtc;
16979 	int ret = 0;
16980 
16981 	state = drm_atomic_state_alloc(dev);
16982 	if (!state)
16983 		return -ENOMEM;
16984 
16985 	drm_modeset_acquire_init(&ctx, 0);
16986 
16987 retry:
16988 	state->acquire_ctx = &ctx;
16989 
16990 	for_each_intel_crtc(dev, crtc) {
16991 		struct intel_crtc_state *crtc_state =
16992 			intel_atomic_get_crtc_state(state, crtc);
16993 
16994 		if (IS_ERR(crtc_state)) {
16995 			ret = PTR_ERR(crtc_state);
16996 			goto out;
16997 		}
16998 
16999 		if (crtc_state->hw.active) {
17000 			struct intel_encoder *encoder;
17001 
17002 			/*
17003 			 * We've not yet detected sink capabilities
17004 			 * (audio,infoframes,etc.) and thus we don't want to
17005 			 * force a full state recomputation yet. We want that to
17006 			 * happen only for the first real commit from userspace.
17007 			 * So preserve the inherited flag for the time being.
17008 			 */
17009 			crtc_state->inherited = true;
17010 
17011 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
17012 			if (ret)
17013 				goto out;
17014 
17015 			/*
17016 			 * FIXME hack to force a LUT update to avoid the
17017 			 * plane update forcing the pipe gamma on without
17018 			 * having a proper LUT loaded. Remove once we
17019 			 * have readout for pipe gamma enable.
17020 			 */
17021 			crtc_state->uapi.color_mgmt_changed = true;
17022 
17023 			for_each_intel_encoder_mask(dev, encoder,
17024 						    crtc_state->uapi.encoder_mask) {
17025 				if (encoder->initial_fastset_check &&
17026 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
17027 					ret = drm_atomic_add_affected_connectors(state,
17028 										 &crtc->base);
17029 					if (ret)
17030 						goto out;
17031 				}
17032 			}
17033 		}
17034 	}
17035 
17036 	ret = drm_atomic_commit(state);
17037 
17038 out:
17039 	if (ret == -EDEADLK) {
17040 		drm_atomic_state_clear(state);
17041 		drm_modeset_backoff(&ctx);
17042 		goto retry;
17043 	}
17044 
17045 	drm_atomic_state_put(state);
17046 
17047 	drm_modeset_drop_locks(&ctx);
17048 	drm_modeset_acquire_fini(&ctx);
17049 
17050 	return ret;
17051 }
17052 
17053 static void intel_mode_config_init(struct drm_i915_private *i915)
17054 {
17055 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
17056 
17057 	drm_mode_config_init(&i915->drm);
17058 	INIT_LIST_HEAD(&i915->global_obj_list);
17059 
17060 	mode_config->min_width = 0;
17061 	mode_config->min_height = 0;
17062 
17063 	mode_config->preferred_depth = 24;
17064 	mode_config->prefer_shadow = 1;
17065 
17066 	mode_config->allow_fb_modifiers = true;
17067 
17068 	mode_config->funcs = &intel_mode_funcs;
17069 
17070 	if (INTEL_GEN(i915) >= 9)
17071 		mode_config->async_page_flip = true;
17072 
17073 	/*
17074 	 * Maximum framebuffer dimensions, chosen to match
17075 	 * the maximum render engine surface size on gen4+.
17076 	 */
17077 	if (INTEL_GEN(i915) >= 7) {
17078 		mode_config->max_width = 16384;
17079 		mode_config->max_height = 16384;
17080 	} else if (INTEL_GEN(i915) >= 4) {
17081 		mode_config->max_width = 8192;
17082 		mode_config->max_height = 8192;
17083 	} else if (IS_GEN(i915, 3)) {
17084 		mode_config->max_width = 4096;
17085 		mode_config->max_height = 4096;
17086 	} else {
17087 		mode_config->max_width = 2048;
17088 		mode_config->max_height = 2048;
17089 	}
17090 
17091 	if (IS_I845G(i915) || IS_I865G(i915)) {
17092 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17093 		mode_config->cursor_height = 1023;
17094 	} else if (IS_I830(i915) || IS_I85X(i915) ||
17095 		   IS_I915G(i915) || IS_I915GM(i915)) {
17096 		mode_config->cursor_width = 64;
17097 		mode_config->cursor_height = 64;
17098 	} else {
17099 		mode_config->cursor_width = 256;
17100 		mode_config->cursor_height = 256;
17101 	}
17102 }
17103 
17104 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17105 {
17106 	intel_atomic_global_obj_cleanup(i915);
17107 	drm_mode_config_cleanup(&i915->drm);
17108 }
17109 
17110 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
17111 {
17112 	if (plane_config->fb) {
17113 		struct drm_framebuffer *fb = &plane_config->fb->base;
17114 
17115 		/* We may only have the stub and not a full framebuffer */
17116 		if (drm_framebuffer_read_refcount(fb))
17117 			drm_framebuffer_put(fb);
17118 		else
17119 			kfree(fb);
17120 	}
17121 
17122 	if (plane_config->vma)
17123 		i915_vma_put(plane_config->vma);
17124 }
17125 
17126 /* part #1: call before irq install */
17127 int intel_modeset_init_noirq(struct drm_i915_private *i915)
17128 {
17129 	int ret;
17130 
17131 	if (i915_inject_probe_failure(i915))
17132 		return -ENODEV;
17133 
17134 	if (HAS_DISPLAY(i915)) {
17135 		ret = drm_vblank_init(&i915->drm,
17136 				      INTEL_NUM_PIPES(i915));
17137 		if (ret)
17138 			return ret;
17139 	}
17140 
17141 	intel_bios_init(i915);
17142 
17143 	ret = intel_vga_register(i915);
17144 	if (ret)
17145 		goto cleanup_bios;
17146 
17147 	/* FIXME: completely on the wrong abstraction layer */
17148 	intel_power_domains_init_hw(i915, false);
17149 
17150 	intel_csr_ucode_init(i915);
17151 
17152 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17153 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17154 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17155 
17156 	intel_mode_config_init(i915);
17157 
17158 	ret = intel_cdclk_init(i915);
17159 	if (ret)
17160 		goto cleanup_vga_client_pw_domain_csr;
17161 
17162 	ret = intel_dbuf_init(i915);
17163 	if (ret)
17164 		goto cleanup_vga_client_pw_domain_csr;
17165 
17166 	ret = intel_bw_init(i915);
17167 	if (ret)
17168 		goto cleanup_vga_client_pw_domain_csr;
17169 
17170 	init_llist_head(&i915->atomic_helper.free_list);
17171 	INIT_WORK(&i915->atomic_helper.free_work,
17172 		  intel_atomic_helper_free_state_worker);
17173 
17174 	intel_init_quirks(i915);
17175 
17176 	intel_fbc_init(i915);
17177 
17178 	return 0;
17179 
17180 cleanup_vga_client_pw_domain_csr:
17181 	intel_csr_ucode_fini(i915);
17182 	intel_power_domains_driver_remove(i915);
17183 	intel_vga_unregister(i915);
17184 cleanup_bios:
17185 	intel_bios_driver_remove(i915);
17186 
17187 	return ret;
17188 }
17189 
17190 /* part #2: call after irq install, but before gem init */
17191 int intel_modeset_init_nogem(struct drm_i915_private *i915)
17192 {
17193 	struct drm_device *dev = &i915->drm;
17194 	enum pipe pipe;
17195 	struct intel_crtc *crtc;
17196 	int ret;
17197 
17198 	intel_init_pm(i915);
17199 
17200 	intel_panel_sanitize_ssc(i915);
17201 
17202 	intel_gmbus_setup(i915);
17203 
17204 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17205 		    INTEL_NUM_PIPES(i915),
17206 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17207 
17208 	if (HAS_DISPLAY(i915)) {
17209 		for_each_pipe(i915, pipe) {
17210 			ret = intel_crtc_init(i915, pipe);
17211 			if (ret) {
17212 				intel_mode_config_cleanup(i915);
17213 				return ret;
17214 			}
17215 		}
17216 	}
17217 
17218 	intel_plane_possible_crtcs_init(i915);
17219 	intel_shared_dpll_init(dev);
17220 	intel_update_fdi_pll_freq(i915);
17221 
17222 	intel_update_czclk(i915);
17223 	intel_modeset_init_hw(i915);
17224 
17225 	intel_hdcp_component_init(i915);
17226 
17227 	if (i915->max_cdclk_freq == 0)
17228 		intel_update_max_cdclk(i915);
17229 
17230 	/*
17231 	 * If the platform has HTI, we need to find out whether it has reserved
17232 	 * any display resources before we create our display outputs.
17233 	 */
17234 	if (INTEL_INFO(i915)->display.has_hti)
17235 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
17236 
17237 	/* Just disable it once at startup */
17238 	intel_vga_disable(i915);
17239 	intel_setup_outputs(i915);
17240 
17241 	drm_modeset_lock_all(dev);
17242 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17243 	drm_modeset_unlock_all(dev);
17244 
17245 	for_each_intel_crtc(dev, crtc) {
17246 		struct intel_initial_plane_config plane_config = {};
17247 
17248 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
17249 			continue;
17250 
17251 		/*
17252 		 * Note that reserving the BIOS fb up front prevents us
17253 		 * from stuffing other stolen allocations like the ring
17254 		 * on top.  This prevents some ugliness at boot time, and
17255 		 * can even allow for smooth boot transitions if the BIOS
17256 		 * fb is large enough for the active pipe configuration.
17257 		 */
17258 		i915->display.get_initial_plane_config(crtc, &plane_config);
17259 
17260 		/*
17261 		 * If the fb is shared between multiple heads, we'll
17262 		 * just get the first one.
17263 		 */
17264 		intel_find_initial_plane_obj(crtc, &plane_config);
17265 
17266 		plane_config_fini(&plane_config);
17267 	}
17268 
17269 	/*
17270 	 * Make sure hardware watermarks really match the state we read out.
17271 	 * Note that we need to do this after reconstructing the BIOS fb's
17272 	 * since the watermark calculation done here will use pstate->fb.
17273 	 */
17274 	if (!HAS_GMCH(i915))
17275 		sanitize_watermarks(i915);
17276 
17277 	return 0;
17278 }
17279 
17280 /* part #3: call after gem init */
17281 int intel_modeset_init(struct drm_i915_private *i915)
17282 {
17283 	int ret;
17284 
17285 	if (!HAS_DISPLAY(i915))
17286 		return 0;
17287 
17288 	/*
17289 	 * Force all active planes to recompute their states. So that on
17290 	 * mode_setcrtc after probe, all the intel_plane_state variables
17291 	 * are already calculated and there is no assert_plane warnings
17292 	 * during bootup.
17293 	 */
17294 	ret = intel_initial_commit(&i915->drm);
17295 	if (ret)
17296 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
17297 
17298 	intel_overlay_setup(i915);
17299 
17300 	ret = intel_fbdev_init(&i915->drm);
17301 	if (ret)
17302 		return ret;
17303 
17304 	/* Only enable hotplug handling once the fbdev is fully set up. */
17305 	intel_hpd_init(i915);
17306 	intel_hpd_poll_disable(i915);
17307 
17308 	intel_init_ipc(i915);
17309 
17310 	return 0;
17311 }
17312 
17313 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17314 {
17315 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17316 	/* 640x480@60Hz, ~25175 kHz */
17317 	struct dpll clock = {
17318 		.m1 = 18,
17319 		.m2 = 7,
17320 		.p1 = 13,
17321 		.p2 = 4,
17322 		.n = 2,
17323 	};
17324 	u32 dpll, fp;
17325 	int i;
17326 
17327 	drm_WARN_ON(&dev_priv->drm,
17328 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
17329 
17330 	drm_dbg_kms(&dev_priv->drm,
17331 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17332 		    pipe_name(pipe), clock.vco, clock.dot);
17333 
17334 	fp = i9xx_dpll_compute_fp(&clock);
17335 	dpll = DPLL_DVO_2X_MODE |
17336 		DPLL_VGA_MODE_DIS |
17337 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17338 		PLL_P2_DIVIDE_BY_4 |
17339 		PLL_REF_INPUT_DREFCLK |
17340 		DPLL_VCO_ENABLE;
17341 
17342 	intel_de_write(dev_priv, FP0(pipe), fp);
17343 	intel_de_write(dev_priv, FP1(pipe), fp);
17344 
17345 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17346 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17347 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17348 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17349 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17350 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17351 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17352 
17353 	/*
17354 	 * Apparently we need to have VGA mode enabled prior to changing
17355 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17356 	 * dividers, even though the register value does change.
17357 	 */
17358 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17359 	intel_de_write(dev_priv, DPLL(pipe), dpll);
17360 
17361 	/* Wait for the clocks to stabilize. */
17362 	intel_de_posting_read(dev_priv, DPLL(pipe));
17363 	udelay(150);
17364 
17365 	/* The pixel multiplier can only be updated once the
17366 	 * DPLL is enabled and the clocks are stable.
17367 	 *
17368 	 * So write it again.
17369 	 */
17370 	intel_de_write(dev_priv, DPLL(pipe), dpll);
17371 
17372 	/* We do this three times for luck */
17373 	for (i = 0; i < 3 ; i++) {
17374 		intel_de_write(dev_priv, DPLL(pipe), dpll);
17375 		intel_de_posting_read(dev_priv, DPLL(pipe));
17376 		udelay(150); /* wait for warmup */
17377 	}
17378 
17379 	intel_de_write(dev_priv, PIPECONF(pipe),
17380 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17381 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
17382 
17383 	intel_wait_for_pipe_scanline_moving(crtc);
17384 }
17385 
17386 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17387 {
17388 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17389 
17390 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
17391 		    pipe_name(pipe));
17392 
17393 	drm_WARN_ON(&dev_priv->drm,
17394 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
17395 		    DISPLAY_PLANE_ENABLE);
17396 	drm_WARN_ON(&dev_priv->drm,
17397 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
17398 		    DISPLAY_PLANE_ENABLE);
17399 	drm_WARN_ON(&dev_priv->drm,
17400 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
17401 		    DISPLAY_PLANE_ENABLE);
17402 	drm_WARN_ON(&dev_priv->drm,
17403 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
17404 	drm_WARN_ON(&dev_priv->drm,
17405 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
17406 
17407 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
17408 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
17409 
17410 	intel_wait_for_pipe_scanline_stopped(crtc);
17411 
17412 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
17413 	intel_de_posting_read(dev_priv, DPLL(pipe));
17414 }
17415 
17416 static void
17417 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17418 {
17419 	struct intel_crtc *crtc;
17420 
17421 	if (INTEL_GEN(dev_priv) >= 4)
17422 		return;
17423 
17424 	for_each_intel_crtc(&dev_priv->drm, crtc) {
17425 		struct intel_plane *plane =
17426 			to_intel_plane(crtc->base.primary);
17427 		struct intel_crtc *plane_crtc;
17428 		enum pipe pipe;
17429 
17430 		if (!plane->get_hw_state(plane, &pipe))
17431 			continue;
17432 
17433 		if (pipe == crtc->pipe)
17434 			continue;
17435 
17436 		drm_dbg_kms(&dev_priv->drm,
17437 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17438 			    plane->base.base.id, plane->base.name);
17439 
17440 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17441 		intel_plane_disable_noatomic(plane_crtc, plane);
17442 	}
17443 }
17444 
17445 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17446 {
17447 	struct drm_device *dev = crtc->base.dev;
17448 	struct intel_encoder *encoder;
17449 
17450 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17451 		return true;
17452 
17453 	return false;
17454 }
17455 
17456 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17457 {
17458 	struct drm_device *dev = encoder->base.dev;
17459 	struct intel_connector *connector;
17460 
17461 	for_each_connector_on_encoder(dev, &encoder->base, connector)
17462 		return connector;
17463 
17464 	return NULL;
17465 }
17466 
17467 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17468 			      enum pipe pch_transcoder)
17469 {
17470 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17471 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17472 }
17473 
17474 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
17475 {
17476 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
17477 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
17478 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17479 
17480 	if (INTEL_GEN(dev_priv) >= 9 ||
17481 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17482 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
17483 		u32 val;
17484 
17485 		if (transcoder_is_dsi(cpu_transcoder))
17486 			return;
17487 
17488 		val = intel_de_read(dev_priv, reg);
17489 		val &= ~HSW_FRAME_START_DELAY_MASK;
17490 		val |= HSW_FRAME_START_DELAY(0);
17491 		intel_de_write(dev_priv, reg, val);
17492 	} else {
17493 		i915_reg_t reg = PIPECONF(cpu_transcoder);
17494 		u32 val;
17495 
17496 		val = intel_de_read(dev_priv, reg);
17497 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
17498 		val |= PIPECONF_FRAME_START_DELAY(0);
17499 		intel_de_write(dev_priv, reg, val);
17500 	}
17501 
17502 	if (!crtc_state->has_pch_encoder)
17503 		return;
17504 
17505 	if (HAS_PCH_IBX(dev_priv)) {
17506 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
17507 		u32 val;
17508 
17509 		val = intel_de_read(dev_priv, reg);
17510 		val &= ~TRANS_FRAME_START_DELAY_MASK;
17511 		val |= TRANS_FRAME_START_DELAY(0);
17512 		intel_de_write(dev_priv, reg, val);
17513 	} else {
17514 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
17515 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
17516 		u32 val;
17517 
17518 		val = intel_de_read(dev_priv, reg);
17519 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
17520 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
17521 		intel_de_write(dev_priv, reg, val);
17522 	}
17523 }
17524 
17525 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17526 				struct drm_modeset_acquire_ctx *ctx)
17527 {
17528 	struct drm_device *dev = crtc->base.dev;
17529 	struct drm_i915_private *dev_priv = to_i915(dev);
17530 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17531 
17532 	if (crtc_state->hw.active) {
17533 		struct intel_plane *plane;
17534 
17535 		/* Clear any frame start delays used for debugging left by the BIOS */
17536 		intel_sanitize_frame_start_delay(crtc_state);
17537 
17538 		/* Disable everything but the primary plane */
17539 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
17540 			const struct intel_plane_state *plane_state =
17541 				to_intel_plane_state(plane->base.state);
17542 
17543 			if (plane_state->uapi.visible &&
17544 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17545 				intel_plane_disable_noatomic(crtc, plane);
17546 		}
17547 
17548 		/*
17549 		 * Disable any background color set by the BIOS, but enable the
17550 		 * gamma and CSC to match how we program our planes.
17551 		 */
17552 		if (INTEL_GEN(dev_priv) >= 9)
17553 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
17554 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
17555 	}
17556 
17557 	/* Adjust the state of the output pipe according to whether we
17558 	 * have active connectors/encoders. */
17559 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
17560 	    !crtc_state->bigjoiner_slave)
17561 		intel_crtc_disable_noatomic(crtc, ctx);
17562 
17563 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
17564 		/*
17565 		 * We start out with underrun reporting disabled to avoid races.
17566 		 * For correct bookkeeping mark this on active crtcs.
17567 		 *
17568 		 * Also on gmch platforms we dont have any hardware bits to
17569 		 * disable the underrun reporting. Which means we need to start
17570 		 * out with underrun reporting disabled also on inactive pipes,
17571 		 * since otherwise we'll complain about the garbage we read when
17572 		 * e.g. coming up after runtime pm.
17573 		 *
17574 		 * No protection against concurrent access is required - at
17575 		 * worst a fifo underrun happens which also sets this to false.
17576 		 */
17577 		crtc->cpu_fifo_underrun_disabled = true;
17578 		/*
17579 		 * We track the PCH trancoder underrun reporting state
17580 		 * within the crtc. With crtc for pipe A housing the underrun
17581 		 * reporting state for PCH transcoder A, crtc for pipe B housing
17582 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17583 		 * and marking underrun reporting as disabled for the non-existing
17584 		 * PCH transcoders B and C would prevent enabling the south
17585 		 * error interrupt (see cpt_can_enable_serr_int()).
17586 		 */
17587 		if (has_pch_trancoder(dev_priv, crtc->pipe))
17588 			crtc->pch_fifo_underrun_disabled = true;
17589 	}
17590 }
17591 
17592 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17593 {
17594 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
17595 
17596 	/*
17597 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17598 	 * the hardware when a high res displays plugged in. DPLL P
17599 	 * divider is zero, and the pipe timings are bonkers. We'll
17600 	 * try to disable everything in that case.
17601 	 *
17602 	 * FIXME would be nice to be able to sanitize this state
17603 	 * without several WARNs, but for now let's take the easy
17604 	 * road.
17605 	 */
17606 	return IS_GEN(dev_priv, 6) &&
17607 		crtc_state->hw.active &&
17608 		crtc_state->shared_dpll &&
17609 		crtc_state->port_clock == 0;
17610 }
17611 
17612 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17613 {
17614 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17615 	struct intel_connector *connector;
17616 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17617 	struct intel_crtc_state *crtc_state = crtc ?
17618 		to_intel_crtc_state(crtc->base.state) : NULL;
17619 
17620 	/* We need to check both for a crtc link (meaning that the
17621 	 * encoder is active and trying to read from a pipe) and the
17622 	 * pipe itself being active. */
17623 	bool has_active_crtc = crtc_state &&
17624 		crtc_state->hw.active;
17625 
17626 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17627 		drm_dbg_kms(&dev_priv->drm,
17628 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17629 			    pipe_name(crtc->pipe));
17630 		has_active_crtc = false;
17631 	}
17632 
17633 	connector = intel_encoder_find_connector(encoder);
17634 	if (connector && !has_active_crtc) {
17635 		drm_dbg_kms(&dev_priv->drm,
17636 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17637 			    encoder->base.base.id,
17638 			    encoder->base.name);
17639 
17640 		/* Connector is active, but has no active pipe. This is
17641 		 * fallout from our resume register restoring. Disable
17642 		 * the encoder manually again. */
17643 		if (crtc_state) {
17644 			struct drm_encoder *best_encoder;
17645 
17646 			drm_dbg_kms(&dev_priv->drm,
17647 				    "[ENCODER:%d:%s] manually disabled\n",
17648 				    encoder->base.base.id,
17649 				    encoder->base.name);
17650 
17651 			/* avoid oopsing in case the hooks consult best_encoder */
17652 			best_encoder = connector->base.state->best_encoder;
17653 			connector->base.state->best_encoder = &encoder->base;
17654 
17655 			/* FIXME NULL atomic state passed! */
17656 			if (encoder->disable)
17657 				encoder->disable(NULL, encoder, crtc_state,
17658 						 connector->base.state);
17659 			if (encoder->post_disable)
17660 				encoder->post_disable(NULL, encoder, crtc_state,
17661 						      connector->base.state);
17662 
17663 			connector->base.state->best_encoder = best_encoder;
17664 		}
17665 		encoder->base.crtc = NULL;
17666 
17667 		/* Inconsistent output/port/pipe state happens presumably due to
17668 		 * a bug in one of the get_hw_state functions. Or someplace else
17669 		 * in our code, like the register restore mess on resume. Clamp
17670 		 * things to off as a safer default. */
17671 
17672 		connector->base.dpms = DRM_MODE_DPMS_OFF;
17673 		connector->base.encoder = NULL;
17674 	}
17675 
17676 	/* notify opregion of the sanitized encoder state */
17677 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17678 
17679 	if (INTEL_GEN(dev_priv) >= 11)
17680 		icl_sanitize_encoder_pll_mapping(encoder);
17681 }
17682 
17683 /* FIXME read out full plane state for all planes */
17684 static void readout_plane_state(struct drm_i915_private *dev_priv)
17685 {
17686 	struct intel_plane *plane;
17687 	struct intel_crtc *crtc;
17688 
17689 	for_each_intel_plane(&dev_priv->drm, plane) {
17690 		struct intel_plane_state *plane_state =
17691 			to_intel_plane_state(plane->base.state);
17692 		struct intel_crtc_state *crtc_state;
17693 		enum pipe pipe = PIPE_A;
17694 		bool visible;
17695 
17696 		visible = plane->get_hw_state(plane, &pipe);
17697 
17698 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17699 		crtc_state = to_intel_crtc_state(crtc->base.state);
17700 
17701 		intel_set_plane_visible(crtc_state, plane_state, visible);
17702 
17703 		drm_dbg_kms(&dev_priv->drm,
17704 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17705 			    plane->base.base.id, plane->base.name,
17706 			    enableddisabled(visible), pipe_name(pipe));
17707 	}
17708 
17709 	for_each_intel_crtc(&dev_priv->drm, crtc) {
17710 		struct intel_crtc_state *crtc_state =
17711 			to_intel_crtc_state(crtc->base.state);
17712 
17713 		fixup_plane_bitmasks(crtc_state);
17714 	}
17715 }
17716 
17717 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17718 {
17719 	struct drm_i915_private *dev_priv = to_i915(dev);
17720 	struct intel_cdclk_state *cdclk_state =
17721 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
17722 	struct intel_dbuf_state *dbuf_state =
17723 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
17724 	enum pipe pipe;
17725 	struct intel_crtc *crtc;
17726 	struct intel_encoder *encoder;
17727 	struct intel_connector *connector;
17728 	struct drm_connector_list_iter conn_iter;
17729 	u8 active_pipes = 0;
17730 
17731 	for_each_intel_crtc(dev, crtc) {
17732 		struct intel_crtc_state *crtc_state =
17733 			to_intel_crtc_state(crtc->base.state);
17734 
17735 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
17736 		intel_crtc_free_hw_state(crtc_state);
17737 		intel_crtc_state_reset(crtc_state, crtc);
17738 
17739 		intel_crtc_get_pipe_config(crtc_state);
17740 
17741 		crtc_state->hw.enable = crtc_state->hw.active;
17742 
17743 		crtc->base.enabled = crtc_state->hw.enable;
17744 		crtc->active = crtc_state->hw.active;
17745 
17746 		if (crtc_state->hw.active)
17747 			active_pipes |= BIT(crtc->pipe);
17748 
17749 		drm_dbg_kms(&dev_priv->drm,
17750 			    "[CRTC:%d:%s] hw state readout: %s\n",
17751 			    crtc->base.base.id, crtc->base.name,
17752 			    enableddisabled(crtc_state->hw.active));
17753 	}
17754 
17755 	dev_priv->active_pipes = cdclk_state->active_pipes =
17756 		dbuf_state->active_pipes = active_pipes;
17757 
17758 	readout_plane_state(dev_priv);
17759 
17760 	intel_dpll_readout_hw_state(dev_priv);
17761 
17762 	for_each_intel_encoder(dev, encoder) {
17763 		pipe = 0;
17764 
17765 		if (encoder->get_hw_state(encoder, &pipe)) {
17766 			struct intel_crtc_state *crtc_state;
17767 
17768 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17769 			crtc_state = to_intel_crtc_state(crtc->base.state);
17770 
17771 			encoder->base.crtc = &crtc->base;
17772 			intel_encoder_get_config(encoder, crtc_state);
17773 			if (encoder->sync_state)
17774 				encoder->sync_state(encoder, crtc_state);
17775 
17776 			/* read out to slave crtc as well for bigjoiner */
17777 			if (crtc_state->bigjoiner) {
17778 				/* encoder should read be linked to bigjoiner master */
17779 				WARN_ON(crtc_state->bigjoiner_slave);
17780 
17781 				crtc = crtc_state->bigjoiner_linked_crtc;
17782 				crtc_state = to_intel_crtc_state(crtc->base.state);
17783 				intel_encoder_get_config(encoder, crtc_state);
17784 			}
17785 		} else {
17786 			encoder->base.crtc = NULL;
17787 		}
17788 
17789 		drm_dbg_kms(&dev_priv->drm,
17790 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17791 			    encoder->base.base.id, encoder->base.name,
17792 			    enableddisabled(encoder->base.crtc),
17793 			    pipe_name(pipe));
17794 	}
17795 
17796 	drm_connector_list_iter_begin(dev, &conn_iter);
17797 	for_each_intel_connector_iter(connector, &conn_iter) {
17798 		if (connector->get_hw_state(connector)) {
17799 			struct intel_crtc_state *crtc_state;
17800 			struct intel_crtc *crtc;
17801 
17802 			connector->base.dpms = DRM_MODE_DPMS_ON;
17803 
17804 			encoder = intel_attached_encoder(connector);
17805 			connector->base.encoder = &encoder->base;
17806 
17807 			crtc = to_intel_crtc(encoder->base.crtc);
17808 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17809 
17810 			if (crtc_state && crtc_state->hw.active) {
17811 				/*
17812 				 * This has to be done during hardware readout
17813 				 * because anything calling .crtc_disable may
17814 				 * rely on the connector_mask being accurate.
17815 				 */
17816 				crtc_state->uapi.connector_mask |=
17817 					drm_connector_mask(&connector->base);
17818 				crtc_state->uapi.encoder_mask |=
17819 					drm_encoder_mask(&encoder->base);
17820 			}
17821 		} else {
17822 			connector->base.dpms = DRM_MODE_DPMS_OFF;
17823 			connector->base.encoder = NULL;
17824 		}
17825 		drm_dbg_kms(&dev_priv->drm,
17826 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
17827 			    connector->base.base.id, connector->base.name,
17828 			    enableddisabled(connector->base.encoder));
17829 	}
17830 	drm_connector_list_iter_end(&conn_iter);
17831 
17832 	for_each_intel_crtc(dev, crtc) {
17833 		struct intel_bw_state *bw_state =
17834 			to_intel_bw_state(dev_priv->bw_obj.state);
17835 		struct intel_crtc_state *crtc_state =
17836 			to_intel_crtc_state(crtc->base.state);
17837 		struct intel_plane *plane;
17838 		int min_cdclk = 0;
17839 
17840 		if (crtc_state->bigjoiner_slave)
17841 			continue;
17842 
17843 		if (crtc_state->hw.active) {
17844 			/*
17845 			 * The initial mode needs to be set in order to keep
17846 			 * the atomic core happy. It wants a valid mode if the
17847 			 * crtc's enabled, so we do the above call.
17848 			 *
17849 			 * But we don't set all the derived state fully, hence
17850 			 * set a flag to indicate that a full recalculation is
17851 			 * needed on the next commit.
17852 			 */
17853 			crtc_state->inherited = true;
17854 
17855 			intel_crtc_update_active_timings(crtc_state);
17856 
17857 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
17858 		}
17859 
17860 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17861 			const struct intel_plane_state *plane_state =
17862 				to_intel_plane_state(plane->base.state);
17863 
17864 			/*
17865 			 * FIXME don't have the fb yet, so can't
17866 			 * use intel_plane_data_rate() :(
17867 			 */
17868 			if (plane_state->uapi.visible)
17869 				crtc_state->data_rate[plane->id] =
17870 					4 * crtc_state->pixel_rate;
17871 			/*
17872 			 * FIXME don't have the fb yet, so can't
17873 			 * use plane->min_cdclk() :(
17874 			 */
17875 			if (plane_state->uapi.visible && plane->min_cdclk) {
17876 				if (crtc_state->double_wide ||
17877 				    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17878 					crtc_state->min_cdclk[plane->id] =
17879 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17880 				else
17881 					crtc_state->min_cdclk[plane->id] =
17882 						crtc_state->pixel_rate;
17883 			}
17884 			drm_dbg_kms(&dev_priv->drm,
17885 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
17886 				    plane->base.base.id, plane->base.name,
17887 				    crtc_state->min_cdclk[plane->id]);
17888 		}
17889 
17890 		if (crtc_state->hw.active) {
17891 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17892 			if (drm_WARN_ON(dev, min_cdclk < 0))
17893 				min_cdclk = 0;
17894 		}
17895 
17896 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
17897 		cdclk_state->min_voltage_level[crtc->pipe] =
17898 			crtc_state->min_voltage_level;
17899 
17900 		intel_bw_crtc_update(bw_state, crtc_state);
17901 
17902 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
17903 
17904 		/* discard our incomplete slave state, copy it from master */
17905 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
17906 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
17907 			struct intel_crtc_state *slave_crtc_state =
17908 				to_intel_crtc_state(slave->base.state);
17909 
17910 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
17911 			slave->base.mode = crtc->base.mode;
17912 
17913 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
17914 			cdclk_state->min_voltage_level[slave->pipe] =
17915 				crtc_state->min_voltage_level;
17916 
17917 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
17918 				const struct intel_plane_state *plane_state =
17919 					to_intel_plane_state(plane->base.state);
17920 
17921 				/*
17922 				 * FIXME don't have the fb yet, so can't
17923 				 * use intel_plane_data_rate() :(
17924 				 */
17925 				if (plane_state->uapi.visible)
17926 					crtc_state->data_rate[plane->id] =
17927 						4 * crtc_state->pixel_rate;
17928 				else
17929 					crtc_state->data_rate[plane->id] = 0;
17930 			}
17931 
17932 			intel_bw_crtc_update(bw_state, slave_crtc_state);
17933 			drm_calc_timestamping_constants(&slave->base,
17934 							&slave_crtc_state->hw.adjusted_mode);
17935 		}
17936 	}
17937 }
17938 
17939 static void
17940 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17941 {
17942 	struct intel_encoder *encoder;
17943 
17944 	for_each_intel_encoder(&dev_priv->drm, encoder) {
17945 		struct intel_crtc_state *crtc_state;
17946 
17947 		if (!encoder->get_power_domains)
17948 			continue;
17949 
17950 		/*
17951 		 * MST-primary and inactive encoders don't have a crtc state
17952 		 * and neither of these require any power domain references.
17953 		 */
17954 		if (!encoder->base.crtc)
17955 			continue;
17956 
17957 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17958 		encoder->get_power_domains(encoder, crtc_state);
17959 	}
17960 }
17961 
17962 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17963 {
17964 	/*
17965 	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
17966 	 * Also known as Wa_14010480278.
17967 	 */
17968 	if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
17969 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
17970 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
17971 
17972 	if (IS_HASWELL(dev_priv)) {
17973 		/*
17974 		 * WaRsPkgCStateDisplayPMReq:hsw
17975 		 * System hang if this isn't done before disabling all planes!
17976 		 */
17977 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
17978 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17979 	}
17980 
17981 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
17982 		/* Display WA #1142:kbl,cfl,cml */
17983 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
17984 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
17985 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
17986 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
17987 			     KBL_ARB_FILL_SPARE_14);
17988 	}
17989 }
17990 
17991 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17992 				       enum port port, i915_reg_t hdmi_reg)
17993 {
17994 	u32 val = intel_de_read(dev_priv, hdmi_reg);
17995 
17996 	if (val & SDVO_ENABLE ||
17997 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17998 		return;
17999 
18000 	drm_dbg_kms(&dev_priv->drm,
18001 		    "Sanitizing transcoder select for HDMI %c\n",
18002 		    port_name(port));
18003 
18004 	val &= ~SDVO_PIPE_SEL_MASK;
18005 	val |= SDVO_PIPE_SEL(PIPE_A);
18006 
18007 	intel_de_write(dev_priv, hdmi_reg, val);
18008 }
18009 
18010 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
18011 				     enum port port, i915_reg_t dp_reg)
18012 {
18013 	u32 val = intel_de_read(dev_priv, dp_reg);
18014 
18015 	if (val & DP_PORT_EN ||
18016 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
18017 		return;
18018 
18019 	drm_dbg_kms(&dev_priv->drm,
18020 		    "Sanitizing transcoder select for DP %c\n",
18021 		    port_name(port));
18022 
18023 	val &= ~DP_PIPE_SEL_MASK;
18024 	val |= DP_PIPE_SEL(PIPE_A);
18025 
18026 	intel_de_write(dev_priv, dp_reg, val);
18027 }
18028 
18029 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18030 {
18031 	/*
18032 	 * The BIOS may select transcoder B on some of the PCH
18033 	 * ports even it doesn't enable the port. This would trip
18034 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18035 	 * Sanitize the transcoder select bits to prevent that. We
18036 	 * assume that the BIOS never actually enabled the port,
18037 	 * because if it did we'd actually have to toggle the port
18038 	 * on and back off to make the transcoder A select stick
18039 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
18040 	 * intel_disable_sdvo()).
18041 	 */
18042 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18043 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18044 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18045 
18046 	/* PCH SDVOB multiplex with HDMIB */
18047 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18048 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18049 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18050 }
18051 
18052 /* Scan out the current hw modeset state,
18053  * and sanitizes it to the current state
18054  */
18055 static void
18056 intel_modeset_setup_hw_state(struct drm_device *dev,
18057 			     struct drm_modeset_acquire_ctx *ctx)
18058 {
18059 	struct drm_i915_private *dev_priv = to_i915(dev);
18060 	struct intel_encoder *encoder;
18061 	struct intel_crtc *crtc;
18062 	intel_wakeref_t wakeref;
18063 
18064 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18065 
18066 	intel_early_display_was(dev_priv);
18067 	intel_modeset_readout_hw_state(dev);
18068 
18069 	/* HW state is read out, now we need to sanitize this mess. */
18070 
18071 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
18072 	for_each_intel_encoder(dev, encoder) {
18073 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18074 
18075 		/* We need to sanitize only the MST primary port. */
18076 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
18077 		    intel_phy_is_tc(dev_priv, phy))
18078 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
18079 	}
18080 
18081 	get_encoder_power_domains(dev_priv);
18082 
18083 	if (HAS_PCH_IBX(dev_priv))
18084 		ibx_sanitize_pch_ports(dev_priv);
18085 
18086 	/*
18087 	 * intel_sanitize_plane_mapping() may need to do vblank
18088 	 * waits, so we need vblank interrupts restored beforehand.
18089 	 */
18090 	for_each_intel_crtc(&dev_priv->drm, crtc) {
18091 		struct intel_crtc_state *crtc_state =
18092 			to_intel_crtc_state(crtc->base.state);
18093 
18094 		drm_crtc_vblank_reset(&crtc->base);
18095 
18096 		if (crtc_state->hw.active)
18097 			intel_crtc_vblank_on(crtc_state);
18098 	}
18099 
18100 	intel_sanitize_plane_mapping(dev_priv);
18101 
18102 	for_each_intel_encoder(dev, encoder)
18103 		intel_sanitize_encoder(encoder);
18104 
18105 	for_each_intel_crtc(&dev_priv->drm, crtc) {
18106 		struct intel_crtc_state *crtc_state =
18107 			to_intel_crtc_state(crtc->base.state);
18108 
18109 		intel_sanitize_crtc(crtc, ctx);
18110 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18111 	}
18112 
18113 	intel_modeset_update_connector_atomic_state(dev);
18114 
18115 	intel_dpll_sanitize_state(dev_priv);
18116 
18117 	if (IS_G4X(dev_priv)) {
18118 		g4x_wm_get_hw_state(dev_priv);
18119 		g4x_wm_sanitize(dev_priv);
18120 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18121 		vlv_wm_get_hw_state(dev_priv);
18122 		vlv_wm_sanitize(dev_priv);
18123 	} else if (INTEL_GEN(dev_priv) >= 9) {
18124 		skl_wm_get_hw_state(dev_priv);
18125 	} else if (HAS_PCH_SPLIT(dev_priv)) {
18126 		ilk_wm_get_hw_state(dev_priv);
18127 	}
18128 
18129 	for_each_intel_crtc(dev, crtc) {
18130 		struct intel_crtc_state *crtc_state =
18131 			to_intel_crtc_state(crtc->base.state);
18132 		u64 put_domains;
18133 
18134 		put_domains = modeset_get_crtc_power_domains(crtc_state);
18135 		if (drm_WARN_ON(dev, put_domains))
18136 			modeset_put_crtc_power_domains(crtc, put_domains);
18137 	}
18138 
18139 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18140 }
18141 
18142 void intel_display_resume(struct drm_device *dev)
18143 {
18144 	struct drm_i915_private *dev_priv = to_i915(dev);
18145 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18146 	struct drm_modeset_acquire_ctx ctx;
18147 	int ret;
18148 
18149 	dev_priv->modeset_restore_state = NULL;
18150 	if (state)
18151 		state->acquire_ctx = &ctx;
18152 
18153 	drm_modeset_acquire_init(&ctx, 0);
18154 
18155 	while (1) {
18156 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
18157 		if (ret != -EDEADLK)
18158 			break;
18159 
18160 		drm_modeset_backoff(&ctx);
18161 	}
18162 
18163 	if (!ret)
18164 		ret = __intel_display_resume(dev, state, &ctx);
18165 
18166 	intel_enable_ipc(dev_priv);
18167 	drm_modeset_drop_locks(&ctx);
18168 	drm_modeset_acquire_fini(&ctx);
18169 
18170 	if (ret)
18171 		drm_err(&dev_priv->drm,
18172 			"Restoring old state failed with %i\n", ret);
18173 	if (state)
18174 		drm_atomic_state_put(state);
18175 }
18176 
18177 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18178 {
18179 	struct intel_connector *connector;
18180 	struct drm_connector_list_iter conn_iter;
18181 
18182 	/* Kill all the work that may have been queued by hpd. */
18183 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18184 	for_each_intel_connector_iter(connector, &conn_iter) {
18185 		if (connector->modeset_retry_work.func)
18186 			cancel_work_sync(&connector->modeset_retry_work);
18187 		if (connector->hdcp.shim) {
18188 			cancel_delayed_work_sync(&connector->hdcp.check_work);
18189 			cancel_work_sync(&connector->hdcp.prop_work);
18190 		}
18191 	}
18192 	drm_connector_list_iter_end(&conn_iter);
18193 }
18194 
18195 /* part #1: call before irq uninstall */
18196 void intel_modeset_driver_remove(struct drm_i915_private *i915)
18197 {
18198 	flush_workqueue(i915->flip_wq);
18199 	flush_workqueue(i915->modeset_wq);
18200 
18201 	flush_work(&i915->atomic_helper.free_work);
18202 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
18203 }
18204 
18205 /* part #2: call after irq uninstall */
18206 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
18207 {
18208 	/*
18209 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
18210 	 * poll handlers. Hence disable polling after hpd handling is shut down.
18211 	 */
18212 	intel_hpd_poll_fini(i915);
18213 
18214 	/*
18215 	 * MST topology needs to be suspended so we don't have any calls to
18216 	 * fbdev after it's finalized. MST will be destroyed later as part of
18217 	 * drm_mode_config_cleanup()
18218 	 */
18219 	intel_dp_mst_suspend(i915);
18220 
18221 	/* poll work can call into fbdev, hence clean that up afterwards */
18222 	intel_fbdev_fini(i915);
18223 
18224 	intel_unregister_dsm_handler();
18225 
18226 	intel_fbc_global_disable(i915);
18227 
18228 	/* flush any delayed tasks or pending work */
18229 	flush_scheduled_work();
18230 
18231 	intel_hdcp_component_fini(i915);
18232 
18233 	intel_mode_config_cleanup(i915);
18234 
18235 	intel_overlay_cleanup(i915);
18236 
18237 	intel_gmbus_teardown(i915);
18238 
18239 	destroy_workqueue(i915->flip_wq);
18240 	destroy_workqueue(i915->modeset_wq);
18241 
18242 	intel_fbc_cleanup_cfb(i915);
18243 }
18244 
18245 /* part #3: call after gem init */
18246 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
18247 {
18248 	intel_csr_ucode_fini(i915);
18249 
18250 	intel_power_domains_driver_remove(i915);
18251 
18252 	intel_vga_unregister(i915);
18253 
18254 	intel_bios_driver_remove(i915);
18255 }
18256 
18257 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18258 
18259 struct intel_display_error_state {
18260 
18261 	u32 power_well_driver;
18262 
18263 	struct intel_cursor_error_state {
18264 		u32 control;
18265 		u32 position;
18266 		u32 base;
18267 		u32 size;
18268 	} cursor[I915_MAX_PIPES];
18269 
18270 	struct intel_pipe_error_state {
18271 		bool power_domain_on;
18272 		u32 source;
18273 		u32 stat;
18274 	} pipe[I915_MAX_PIPES];
18275 
18276 	struct intel_plane_error_state {
18277 		u32 control;
18278 		u32 stride;
18279 		u32 size;
18280 		u32 pos;
18281 		u32 addr;
18282 		u32 surface;
18283 		u32 tile_offset;
18284 	} plane[I915_MAX_PIPES];
18285 
18286 	struct intel_transcoder_error_state {
18287 		bool available;
18288 		bool power_domain_on;
18289 		enum transcoder cpu_transcoder;
18290 
18291 		u32 conf;
18292 
18293 		u32 htotal;
18294 		u32 hblank;
18295 		u32 hsync;
18296 		u32 vtotal;
18297 		u32 vblank;
18298 		u32 vsync;
18299 	} transcoder[5];
18300 };
18301 
18302 struct intel_display_error_state *
18303 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18304 {
18305 	struct intel_display_error_state *error;
18306 	int transcoders[] = {
18307 		TRANSCODER_A,
18308 		TRANSCODER_B,
18309 		TRANSCODER_C,
18310 		TRANSCODER_D,
18311 		TRANSCODER_EDP,
18312 	};
18313 	int i;
18314 
18315 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18316 
18317 	if (!HAS_DISPLAY(dev_priv))
18318 		return NULL;
18319 
18320 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
18321 	if (error == NULL)
18322 		return NULL;
18323 
18324 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18325 		error->power_well_driver = intel_de_read(dev_priv,
18326 							 HSW_PWR_WELL_CTL2);
18327 
18328 	for_each_pipe(dev_priv, i) {
18329 		error->pipe[i].power_domain_on =
18330 			__intel_display_power_is_enabled(dev_priv,
18331 							 POWER_DOMAIN_PIPE(i));
18332 		if (!error->pipe[i].power_domain_on)
18333 			continue;
18334 
18335 		error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
18336 		error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
18337 		error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
18338 
18339 		error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
18340 		error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
18341 		if (INTEL_GEN(dev_priv) <= 3) {
18342 			error->plane[i].size = intel_de_read(dev_priv,
18343 							     DSPSIZE(i));
18344 			error->plane[i].pos = intel_de_read(dev_priv,
18345 							    DSPPOS(i));
18346 		}
18347 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18348 			error->plane[i].addr = intel_de_read(dev_priv,
18349 							     DSPADDR(i));
18350 		if (INTEL_GEN(dev_priv) >= 4) {
18351 			error->plane[i].surface = intel_de_read(dev_priv,
18352 								DSPSURF(i));
18353 			error->plane[i].tile_offset = intel_de_read(dev_priv,
18354 								    DSPTILEOFF(i));
18355 		}
18356 
18357 		error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
18358 
18359 		if (HAS_GMCH(dev_priv))
18360 			error->pipe[i].stat = intel_de_read(dev_priv,
18361 							    PIPESTAT(i));
18362 	}
18363 
18364 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18365 		enum transcoder cpu_transcoder = transcoders[i];
18366 
18367 		if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
18368 			continue;
18369 
18370 		error->transcoder[i].available = true;
18371 		error->transcoder[i].power_domain_on =
18372 			__intel_display_power_is_enabled(dev_priv,
18373 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18374 		if (!error->transcoder[i].power_domain_on)
18375 			continue;
18376 
18377 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
18378 
18379 		error->transcoder[i].conf = intel_de_read(dev_priv,
18380 							  PIPECONF(cpu_transcoder));
18381 		error->transcoder[i].htotal = intel_de_read(dev_priv,
18382 							    HTOTAL(cpu_transcoder));
18383 		error->transcoder[i].hblank = intel_de_read(dev_priv,
18384 							    HBLANK(cpu_transcoder));
18385 		error->transcoder[i].hsync = intel_de_read(dev_priv,
18386 							   HSYNC(cpu_transcoder));
18387 		error->transcoder[i].vtotal = intel_de_read(dev_priv,
18388 							    VTOTAL(cpu_transcoder));
18389 		error->transcoder[i].vblank = intel_de_read(dev_priv,
18390 							    VBLANK(cpu_transcoder));
18391 		error->transcoder[i].vsync = intel_de_read(dev_priv,
18392 							   VSYNC(cpu_transcoder));
18393 	}
18394 
18395 	return error;
18396 }
18397 
18398 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18399 
18400 void
18401 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18402 				struct intel_display_error_state *error)
18403 {
18404 	struct drm_i915_private *dev_priv = m->i915;
18405 	int i;
18406 
18407 	if (!error)
18408 		return;
18409 
18410 	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18411 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18412 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
18413 			   error->power_well_driver);
18414 	for_each_pipe(dev_priv, i) {
18415 		err_printf(m, "Pipe [%d]:\n", i);
18416 		err_printf(m, "  Power: %s\n",
18417 			   onoff(error->pipe[i].power_domain_on));
18418 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18419 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18420 
18421 		err_printf(m, "Plane [%d]:\n", i);
18422 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18423 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18424 		if (INTEL_GEN(dev_priv) <= 3) {
18425 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18426 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18427 		}
18428 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18429 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18430 		if (INTEL_GEN(dev_priv) >= 4) {
18431 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18432 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18433 		}
18434 
18435 		err_printf(m, "Cursor [%d]:\n", i);
18436 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18437 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18438 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18439 	}
18440 
18441 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18442 		if (!error->transcoder[i].available)
18443 			continue;
18444 
18445 		err_printf(m, "CPU transcoder: %s\n",
18446 			   transcoder_name(error->transcoder[i].cpu_transcoder));
18447 		err_printf(m, "  Power: %s\n",
18448 			   onoff(error->transcoder[i].power_domain_on));
18449 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
18450 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
18451 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
18452 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
18453 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
18454 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
18455 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
18456 	}
18457 }
18458 
18459 #endif
18460