1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34 
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45 
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57 
58 #include "gt/intel_rps.h"
59 
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_fbc.h"
70 #include "intel_fbdev.h"
71 #include "intel_fifo_underrun.h"
72 #include "intel_frontbuffer.h"
73 #include "intel_hdcp.h"
74 #include "intel_hotplug.h"
75 #include "intel_overlay.h"
76 #include "intel_pipe_crc.h"
77 #include "intel_pm.h"
78 #include "intel_psr.h"
79 #include "intel_quirks.h"
80 #include "intel_sideband.h"
81 #include "intel_sprite.h"
82 #include "intel_tc.h"
83 #include "intel_vga.h"
84 
85 /* Primary plane formats for gen <= 3 */
86 static const u32 i8xx_primary_formats[] = {
87 	DRM_FORMAT_C8,
88 	DRM_FORMAT_RGB565,
89 	DRM_FORMAT_XRGB1555,
90 	DRM_FORMAT_XRGB8888,
91 };
92 
93 /* Primary plane formats for ivb (no fp16 due to hw issue) */
94 static const u32 ivb_primary_formats[] = {
95 	DRM_FORMAT_C8,
96 	DRM_FORMAT_RGB565,
97 	DRM_FORMAT_XRGB8888,
98 	DRM_FORMAT_XBGR8888,
99 	DRM_FORMAT_XRGB2101010,
100 	DRM_FORMAT_XBGR2101010,
101 };
102 
103 /* Primary plane formats for gen >= 4, except ivb */
104 static const u32 i965_primary_formats[] = {
105 	DRM_FORMAT_C8,
106 	DRM_FORMAT_RGB565,
107 	DRM_FORMAT_XRGB8888,
108 	DRM_FORMAT_XBGR8888,
109 	DRM_FORMAT_XRGB2101010,
110 	DRM_FORMAT_XBGR2101010,
111 	DRM_FORMAT_XBGR16161616F,
112 };
113 
114 static const u64 i9xx_format_modifiers[] = {
115 	I915_FORMAT_MOD_X_TILED,
116 	DRM_FORMAT_MOD_LINEAR,
117 	DRM_FORMAT_MOD_INVALID
118 };
119 
120 /* Cursor formats */
121 static const u32 intel_cursor_formats[] = {
122 	DRM_FORMAT_ARGB8888,
123 };
124 
125 static const u64 cursor_format_modifiers[] = {
126 	DRM_FORMAT_MOD_LINEAR,
127 	DRM_FORMAT_MOD_INVALID
128 };
129 
130 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
131 				struct intel_crtc_state *pipe_config);
132 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
133 				   struct intel_crtc_state *pipe_config);
134 
135 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
136 				  struct drm_i915_gem_object *obj,
137 				  struct drm_mode_fb_cmd2 *mode_cmd);
138 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
139 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
140 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
141 					 const struct intel_link_m_n *m_n,
142 					 const struct intel_link_m_n *m2_n2);
143 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
144 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
145 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
146 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
147 static void vlv_prepare_pll(struct intel_crtc *crtc,
148 			    const struct intel_crtc_state *pipe_config);
149 static void chv_prepare_pll(struct intel_crtc *crtc,
150 			    const struct intel_crtc_state *pipe_config);
151 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
152 				    struct intel_crtc_state *crtc_state);
153 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
154 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
155 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
156 static void intel_modeset_setup_hw_state(struct drm_device *dev,
157 					 struct drm_modeset_acquire_ctx *ctx);
158 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
159 
160 struct intel_limit {
161 	struct {
162 		int min, max;
163 	} dot, vco, n, m, m1, m2, p, p1;
164 
165 	struct {
166 		int dot_limit;
167 		int p2_slow, p2_fast;
168 	} p2;
169 };
170 
171 /* returns HPLL frequency in kHz */
172 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
173 {
174 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
175 
176 	/* Obtain SKU information */
177 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
178 		CCK_FUSE_HPLL_FREQ_MASK;
179 
180 	return vco_freq[hpll_freq] * 1000;
181 }
182 
183 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
184 		      const char *name, u32 reg, int ref_freq)
185 {
186 	u32 val;
187 	int divider;
188 
189 	val = vlv_cck_read(dev_priv, reg);
190 	divider = val & CCK_FREQUENCY_VALUES;
191 
192 	WARN((val & CCK_FREQUENCY_STATUS) !=
193 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
194 	     "%s change in progress\n", name);
195 
196 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
197 }
198 
199 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
200 			   const char *name, u32 reg)
201 {
202 	int hpll;
203 
204 	vlv_cck_get(dev_priv);
205 
206 	if (dev_priv->hpll_freq == 0)
207 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
208 
209 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
210 
211 	vlv_cck_put(dev_priv);
212 
213 	return hpll;
214 }
215 
216 static void intel_update_czclk(struct drm_i915_private *dev_priv)
217 {
218 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
219 		return;
220 
221 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
222 						      CCK_CZ_CLOCK_CONTROL);
223 
224 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
225 }
226 
227 static inline u32 /* units of 100MHz */
228 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
229 		    const struct intel_crtc_state *pipe_config)
230 {
231 	if (HAS_DDI(dev_priv))
232 		return pipe_config->port_clock; /* SPLL */
233 	else
234 		return dev_priv->fdi_pll_freq;
235 }
236 
237 static const struct intel_limit intel_limits_i8xx_dac = {
238 	.dot = { .min = 25000, .max = 350000 },
239 	.vco = { .min = 908000, .max = 1512000 },
240 	.n = { .min = 2, .max = 16 },
241 	.m = { .min = 96, .max = 140 },
242 	.m1 = { .min = 18, .max = 26 },
243 	.m2 = { .min = 6, .max = 16 },
244 	.p = { .min = 4, .max = 128 },
245 	.p1 = { .min = 2, .max = 33 },
246 	.p2 = { .dot_limit = 165000,
247 		.p2_slow = 4, .p2_fast = 2 },
248 };
249 
250 static const struct intel_limit intel_limits_i8xx_dvo = {
251 	.dot = { .min = 25000, .max = 350000 },
252 	.vco = { .min = 908000, .max = 1512000 },
253 	.n = { .min = 2, .max = 16 },
254 	.m = { .min = 96, .max = 140 },
255 	.m1 = { .min = 18, .max = 26 },
256 	.m2 = { .min = 6, .max = 16 },
257 	.p = { .min = 4, .max = 128 },
258 	.p1 = { .min = 2, .max = 33 },
259 	.p2 = { .dot_limit = 165000,
260 		.p2_slow = 4, .p2_fast = 4 },
261 };
262 
263 static const struct intel_limit intel_limits_i8xx_lvds = {
264 	.dot = { .min = 25000, .max = 350000 },
265 	.vco = { .min = 908000, .max = 1512000 },
266 	.n = { .min = 2, .max = 16 },
267 	.m = { .min = 96, .max = 140 },
268 	.m1 = { .min = 18, .max = 26 },
269 	.m2 = { .min = 6, .max = 16 },
270 	.p = { .min = 4, .max = 128 },
271 	.p1 = { .min = 1, .max = 6 },
272 	.p2 = { .dot_limit = 165000,
273 		.p2_slow = 14, .p2_fast = 7 },
274 };
275 
276 static const struct intel_limit intel_limits_i9xx_sdvo = {
277 	.dot = { .min = 20000, .max = 400000 },
278 	.vco = { .min = 1400000, .max = 2800000 },
279 	.n = { .min = 1, .max = 6 },
280 	.m = { .min = 70, .max = 120 },
281 	.m1 = { .min = 8, .max = 18 },
282 	.m2 = { .min = 3, .max = 7 },
283 	.p = { .min = 5, .max = 80 },
284 	.p1 = { .min = 1, .max = 8 },
285 	.p2 = { .dot_limit = 200000,
286 		.p2_slow = 10, .p2_fast = 5 },
287 };
288 
289 static const struct intel_limit intel_limits_i9xx_lvds = {
290 	.dot = { .min = 20000, .max = 400000 },
291 	.vco = { .min = 1400000, .max = 2800000 },
292 	.n = { .min = 1, .max = 6 },
293 	.m = { .min = 70, .max = 120 },
294 	.m1 = { .min = 8, .max = 18 },
295 	.m2 = { .min = 3, .max = 7 },
296 	.p = { .min = 7, .max = 98 },
297 	.p1 = { .min = 1, .max = 8 },
298 	.p2 = { .dot_limit = 112000,
299 		.p2_slow = 14, .p2_fast = 7 },
300 };
301 
302 
303 static const struct intel_limit intel_limits_g4x_sdvo = {
304 	.dot = { .min = 25000, .max = 270000 },
305 	.vco = { .min = 1750000, .max = 3500000},
306 	.n = { .min = 1, .max = 4 },
307 	.m = { .min = 104, .max = 138 },
308 	.m1 = { .min = 17, .max = 23 },
309 	.m2 = { .min = 5, .max = 11 },
310 	.p = { .min = 10, .max = 30 },
311 	.p1 = { .min = 1, .max = 3},
312 	.p2 = { .dot_limit = 270000,
313 		.p2_slow = 10,
314 		.p2_fast = 10
315 	},
316 };
317 
318 static const struct intel_limit intel_limits_g4x_hdmi = {
319 	.dot = { .min = 22000, .max = 400000 },
320 	.vco = { .min = 1750000, .max = 3500000},
321 	.n = { .min = 1, .max = 4 },
322 	.m = { .min = 104, .max = 138 },
323 	.m1 = { .min = 16, .max = 23 },
324 	.m2 = { .min = 5, .max = 11 },
325 	.p = { .min = 5, .max = 80 },
326 	.p1 = { .min = 1, .max = 8},
327 	.p2 = { .dot_limit = 165000,
328 		.p2_slow = 10, .p2_fast = 5 },
329 };
330 
331 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
332 	.dot = { .min = 20000, .max = 115000 },
333 	.vco = { .min = 1750000, .max = 3500000 },
334 	.n = { .min = 1, .max = 3 },
335 	.m = { .min = 104, .max = 138 },
336 	.m1 = { .min = 17, .max = 23 },
337 	.m2 = { .min = 5, .max = 11 },
338 	.p = { .min = 28, .max = 112 },
339 	.p1 = { .min = 2, .max = 8 },
340 	.p2 = { .dot_limit = 0,
341 		.p2_slow = 14, .p2_fast = 14
342 	},
343 };
344 
345 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
346 	.dot = { .min = 80000, .max = 224000 },
347 	.vco = { .min = 1750000, .max = 3500000 },
348 	.n = { .min = 1, .max = 3 },
349 	.m = { .min = 104, .max = 138 },
350 	.m1 = { .min = 17, .max = 23 },
351 	.m2 = { .min = 5, .max = 11 },
352 	.p = { .min = 14, .max = 42 },
353 	.p1 = { .min = 2, .max = 6 },
354 	.p2 = { .dot_limit = 0,
355 		.p2_slow = 7, .p2_fast = 7
356 	},
357 };
358 
359 static const struct intel_limit intel_limits_pineview_sdvo = {
360 	.dot = { .min = 20000, .max = 400000},
361 	.vco = { .min = 1700000, .max = 3500000 },
362 	/* Pineview's Ncounter is a ring counter */
363 	.n = { .min = 3, .max = 6 },
364 	.m = { .min = 2, .max = 256 },
365 	/* Pineview only has one combined m divider, which we treat as m2. */
366 	.m1 = { .min = 0, .max = 0 },
367 	.m2 = { .min = 0, .max = 254 },
368 	.p = { .min = 5, .max = 80 },
369 	.p1 = { .min = 1, .max = 8 },
370 	.p2 = { .dot_limit = 200000,
371 		.p2_slow = 10, .p2_fast = 5 },
372 };
373 
374 static const struct intel_limit intel_limits_pineview_lvds = {
375 	.dot = { .min = 20000, .max = 400000 },
376 	.vco = { .min = 1700000, .max = 3500000 },
377 	.n = { .min = 3, .max = 6 },
378 	.m = { .min = 2, .max = 256 },
379 	.m1 = { .min = 0, .max = 0 },
380 	.m2 = { .min = 0, .max = 254 },
381 	.p = { .min = 7, .max = 112 },
382 	.p1 = { .min = 1, .max = 8 },
383 	.p2 = { .dot_limit = 112000,
384 		.p2_slow = 14, .p2_fast = 14 },
385 };
386 
387 /* Ironlake / Sandybridge
388  *
389  * We calculate clock using (register_value + 2) for N/M1/M2, so here
390  * the range value for them is (actual_value - 2).
391  */
392 static const struct intel_limit intel_limits_ironlake_dac = {
393 	.dot = { .min = 25000, .max = 350000 },
394 	.vco = { .min = 1760000, .max = 3510000 },
395 	.n = { .min = 1, .max = 5 },
396 	.m = { .min = 79, .max = 127 },
397 	.m1 = { .min = 12, .max = 22 },
398 	.m2 = { .min = 5, .max = 9 },
399 	.p = { .min = 5, .max = 80 },
400 	.p1 = { .min = 1, .max = 8 },
401 	.p2 = { .dot_limit = 225000,
402 		.p2_slow = 10, .p2_fast = 5 },
403 };
404 
405 static const struct intel_limit intel_limits_ironlake_single_lvds = {
406 	.dot = { .min = 25000, .max = 350000 },
407 	.vco = { .min = 1760000, .max = 3510000 },
408 	.n = { .min = 1, .max = 3 },
409 	.m = { .min = 79, .max = 118 },
410 	.m1 = { .min = 12, .max = 22 },
411 	.m2 = { .min = 5, .max = 9 },
412 	.p = { .min = 28, .max = 112 },
413 	.p1 = { .min = 2, .max = 8 },
414 	.p2 = { .dot_limit = 225000,
415 		.p2_slow = 14, .p2_fast = 14 },
416 };
417 
418 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
419 	.dot = { .min = 25000, .max = 350000 },
420 	.vco = { .min = 1760000, .max = 3510000 },
421 	.n = { .min = 1, .max = 3 },
422 	.m = { .min = 79, .max = 127 },
423 	.m1 = { .min = 12, .max = 22 },
424 	.m2 = { .min = 5, .max = 9 },
425 	.p = { .min = 14, .max = 56 },
426 	.p1 = { .min = 2, .max = 8 },
427 	.p2 = { .dot_limit = 225000,
428 		.p2_slow = 7, .p2_fast = 7 },
429 };
430 
431 /* LVDS 100mhz refclk limits. */
432 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
433 	.dot = { .min = 25000, .max = 350000 },
434 	.vco = { .min = 1760000, .max = 3510000 },
435 	.n = { .min = 1, .max = 2 },
436 	.m = { .min = 79, .max = 126 },
437 	.m1 = { .min = 12, .max = 22 },
438 	.m2 = { .min = 5, .max = 9 },
439 	.p = { .min = 28, .max = 112 },
440 	.p1 = { .min = 2, .max = 8 },
441 	.p2 = { .dot_limit = 225000,
442 		.p2_slow = 14, .p2_fast = 14 },
443 };
444 
445 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
446 	.dot = { .min = 25000, .max = 350000 },
447 	.vco = { .min = 1760000, .max = 3510000 },
448 	.n = { .min = 1, .max = 3 },
449 	.m = { .min = 79, .max = 126 },
450 	.m1 = { .min = 12, .max = 22 },
451 	.m2 = { .min = 5, .max = 9 },
452 	.p = { .min = 14, .max = 42 },
453 	.p1 = { .min = 2, .max = 6 },
454 	.p2 = { .dot_limit = 225000,
455 		.p2_slow = 7, .p2_fast = 7 },
456 };
457 
458 static const struct intel_limit intel_limits_vlv = {
459 	 /*
460 	  * These are the data rate limits (measured in fast clocks)
461 	  * since those are the strictest limits we have. The fast
462 	  * clock and actual rate limits are more relaxed, so checking
463 	  * them would make no difference.
464 	  */
465 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
466 	.vco = { .min = 4000000, .max = 6000000 },
467 	.n = { .min = 1, .max = 7 },
468 	.m1 = { .min = 2, .max = 3 },
469 	.m2 = { .min = 11, .max = 156 },
470 	.p1 = { .min = 2, .max = 3 },
471 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
472 };
473 
474 static const struct intel_limit intel_limits_chv = {
475 	/*
476 	 * These are the data rate limits (measured in fast clocks)
477 	 * since those are the strictest limits we have.  The fast
478 	 * clock and actual rate limits are more relaxed, so checking
479 	 * them would make no difference.
480 	 */
481 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
482 	.vco = { .min = 4800000, .max = 6480000 },
483 	.n = { .min = 1, .max = 1 },
484 	.m1 = { .min = 2, .max = 2 },
485 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
486 	.p1 = { .min = 2, .max = 4 },
487 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
488 };
489 
490 static const struct intel_limit intel_limits_bxt = {
491 	/* FIXME: find real dot limits */
492 	.dot = { .min = 0, .max = INT_MAX },
493 	.vco = { .min = 4800000, .max = 6700000 },
494 	.n = { .min = 1, .max = 1 },
495 	.m1 = { .min = 2, .max = 2 },
496 	/* FIXME: find real m2 limits */
497 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
498 	.p1 = { .min = 2, .max = 4 },
499 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
500 };
501 
502 /* WA Display #0827: Gen9:all */
503 static void
504 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
505 {
506 	if (enable)
507 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
508 			   I915_READ(CLKGATE_DIS_PSL(pipe)) |
509 			   DUPS1_GATING_DIS | DUPS2_GATING_DIS);
510 	else
511 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
512 			   I915_READ(CLKGATE_DIS_PSL(pipe)) &
513 			   ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
514 }
515 
516 /* Wa_2006604312:icl */
517 static void
518 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
519 		       bool enable)
520 {
521 	if (enable)
522 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
523 			   I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
524 	else
525 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
526 			   I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
527 }
528 
529 static bool
530 needs_modeset(const struct intel_crtc_state *state)
531 {
532 	return drm_atomic_crtc_needs_modeset(&state->base);
533 }
534 
535 bool
536 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
537 {
538 	return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
539 		crtc_state->sync_mode_slaves_mask);
540 }
541 
542 static bool
543 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
544 {
545 	return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
546 		crtc_state->sync_mode_slaves_mask);
547 }
548 
549 /*
550  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
551  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
552  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
553  * The helpers' return value is the rate of the clock that is fed to the
554  * display engine's pipe which can be the above fast dot clock rate or a
555  * divided-down version of it.
556  */
557 /* m1 is reserved as 0 in Pineview, n is a ring counter */
558 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
559 {
560 	clock->m = clock->m2 + 2;
561 	clock->p = clock->p1 * clock->p2;
562 	if (WARN_ON(clock->n == 0 || clock->p == 0))
563 		return 0;
564 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
565 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
566 
567 	return clock->dot;
568 }
569 
570 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
571 {
572 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
573 }
574 
575 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
576 {
577 	clock->m = i9xx_dpll_compute_m(clock);
578 	clock->p = clock->p1 * clock->p2;
579 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
580 		return 0;
581 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
582 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
583 
584 	return clock->dot;
585 }
586 
587 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
588 {
589 	clock->m = clock->m1 * clock->m2;
590 	clock->p = clock->p1 * clock->p2;
591 	if (WARN_ON(clock->n == 0 || clock->p == 0))
592 		return 0;
593 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
594 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
595 
596 	return clock->dot / 5;
597 }
598 
599 int chv_calc_dpll_params(int refclk, struct dpll *clock)
600 {
601 	clock->m = clock->m1 * clock->m2;
602 	clock->p = clock->p1 * clock->p2;
603 	if (WARN_ON(clock->n == 0 || clock->p == 0))
604 		return 0;
605 	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
606 					   clock->n << 22);
607 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
608 
609 	return clock->dot / 5;
610 }
611 
612 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
613 
614 /*
615  * Returns whether the given set of divisors are valid for a given refclk with
616  * the given connectors.
617  */
618 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
619 			       const struct intel_limit *limit,
620 			       const struct dpll *clock)
621 {
622 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
623 		INTELPllInvalid("n out of range\n");
624 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
625 		INTELPllInvalid("p1 out of range\n");
626 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
627 		INTELPllInvalid("m2 out of range\n");
628 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
629 		INTELPllInvalid("m1 out of range\n");
630 
631 	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
632 	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
633 		if (clock->m1 <= clock->m2)
634 			INTELPllInvalid("m1 <= m2\n");
635 
636 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
637 	    !IS_GEN9_LP(dev_priv)) {
638 		if (clock->p < limit->p.min || limit->p.max < clock->p)
639 			INTELPllInvalid("p out of range\n");
640 		if (clock->m < limit->m.min || limit->m.max < clock->m)
641 			INTELPllInvalid("m out of range\n");
642 	}
643 
644 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
645 		INTELPllInvalid("vco out of range\n");
646 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
647 	 * connector, etc., rather than just a single range.
648 	 */
649 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
650 		INTELPllInvalid("dot out of range\n");
651 
652 	return true;
653 }
654 
655 static int
656 i9xx_select_p2_div(const struct intel_limit *limit,
657 		   const struct intel_crtc_state *crtc_state,
658 		   int target)
659 {
660 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
661 
662 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
663 		/*
664 		 * For LVDS just rely on its current settings for dual-channel.
665 		 * We haven't figured out how to reliably set up different
666 		 * single/dual channel state, if we even can.
667 		 */
668 		if (intel_is_dual_link_lvds(dev_priv))
669 			return limit->p2.p2_fast;
670 		else
671 			return limit->p2.p2_slow;
672 	} else {
673 		if (target < limit->p2.dot_limit)
674 			return limit->p2.p2_slow;
675 		else
676 			return limit->p2.p2_fast;
677 	}
678 }
679 
680 /*
681  * Returns a set of divisors for the desired target clock with the given
682  * refclk, or FALSE.  The returned values represent the clock equation:
683  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
684  *
685  * Target and reference clocks are specified in kHz.
686  *
687  * If match_clock is provided, then best_clock P divider must match the P
688  * divider from @match_clock used for LVDS downclocking.
689  */
690 static bool
691 i9xx_find_best_dpll(const struct intel_limit *limit,
692 		    struct intel_crtc_state *crtc_state,
693 		    int target, int refclk, struct dpll *match_clock,
694 		    struct dpll *best_clock)
695 {
696 	struct drm_device *dev = crtc_state->base.crtc->dev;
697 	struct dpll clock;
698 	int err = target;
699 
700 	memset(best_clock, 0, sizeof(*best_clock));
701 
702 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
703 
704 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
705 	     clock.m1++) {
706 		for (clock.m2 = limit->m2.min;
707 		     clock.m2 <= limit->m2.max; clock.m2++) {
708 			if (clock.m2 >= clock.m1)
709 				break;
710 			for (clock.n = limit->n.min;
711 			     clock.n <= limit->n.max; clock.n++) {
712 				for (clock.p1 = limit->p1.min;
713 					clock.p1 <= limit->p1.max; clock.p1++) {
714 					int this_err;
715 
716 					i9xx_calc_dpll_params(refclk, &clock);
717 					if (!intel_PLL_is_valid(to_i915(dev),
718 								limit,
719 								&clock))
720 						continue;
721 					if (match_clock &&
722 					    clock.p != match_clock->p)
723 						continue;
724 
725 					this_err = abs(clock.dot - target);
726 					if (this_err < err) {
727 						*best_clock = clock;
728 						err = this_err;
729 					}
730 				}
731 			}
732 		}
733 	}
734 
735 	return (err != target);
736 }
737 
738 /*
739  * Returns a set of divisors for the desired target clock with the given
740  * refclk, or FALSE.  The returned values represent the clock equation:
741  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
742  *
743  * Target and reference clocks are specified in kHz.
744  *
745  * If match_clock is provided, then best_clock P divider must match the P
746  * divider from @match_clock used for LVDS downclocking.
747  */
748 static bool
749 pnv_find_best_dpll(const struct intel_limit *limit,
750 		   struct intel_crtc_state *crtc_state,
751 		   int target, int refclk, struct dpll *match_clock,
752 		   struct dpll *best_clock)
753 {
754 	struct drm_device *dev = crtc_state->base.crtc->dev;
755 	struct dpll clock;
756 	int err = target;
757 
758 	memset(best_clock, 0, sizeof(*best_clock));
759 
760 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
761 
762 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
763 	     clock.m1++) {
764 		for (clock.m2 = limit->m2.min;
765 		     clock.m2 <= limit->m2.max; clock.m2++) {
766 			for (clock.n = limit->n.min;
767 			     clock.n <= limit->n.max; clock.n++) {
768 				for (clock.p1 = limit->p1.min;
769 					clock.p1 <= limit->p1.max; clock.p1++) {
770 					int this_err;
771 
772 					pnv_calc_dpll_params(refclk, &clock);
773 					if (!intel_PLL_is_valid(to_i915(dev),
774 								limit,
775 								&clock))
776 						continue;
777 					if (match_clock &&
778 					    clock.p != match_clock->p)
779 						continue;
780 
781 					this_err = abs(clock.dot - target);
782 					if (this_err < err) {
783 						*best_clock = clock;
784 						err = this_err;
785 					}
786 				}
787 			}
788 		}
789 	}
790 
791 	return (err != target);
792 }
793 
794 /*
795  * Returns a set of divisors for the desired target clock with the given
796  * refclk, or FALSE.  The returned values represent the clock equation:
797  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
798  *
799  * Target and reference clocks are specified in kHz.
800  *
801  * If match_clock is provided, then best_clock P divider must match the P
802  * divider from @match_clock used for LVDS downclocking.
803  */
804 static bool
805 g4x_find_best_dpll(const struct intel_limit *limit,
806 		   struct intel_crtc_state *crtc_state,
807 		   int target, int refclk, struct dpll *match_clock,
808 		   struct dpll *best_clock)
809 {
810 	struct drm_device *dev = crtc_state->base.crtc->dev;
811 	struct dpll clock;
812 	int max_n;
813 	bool found = false;
814 	/* approximately equals target * 0.00585 */
815 	int err_most = (target >> 8) + (target >> 9);
816 
817 	memset(best_clock, 0, sizeof(*best_clock));
818 
819 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
820 
821 	max_n = limit->n.max;
822 	/* based on hardware requirement, prefer smaller n to precision */
823 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
824 		/* based on hardware requirement, prefere larger m1,m2 */
825 		for (clock.m1 = limit->m1.max;
826 		     clock.m1 >= limit->m1.min; clock.m1--) {
827 			for (clock.m2 = limit->m2.max;
828 			     clock.m2 >= limit->m2.min; clock.m2--) {
829 				for (clock.p1 = limit->p1.max;
830 				     clock.p1 >= limit->p1.min; clock.p1--) {
831 					int this_err;
832 
833 					i9xx_calc_dpll_params(refclk, &clock);
834 					if (!intel_PLL_is_valid(to_i915(dev),
835 								limit,
836 								&clock))
837 						continue;
838 
839 					this_err = abs(clock.dot - target);
840 					if (this_err < err_most) {
841 						*best_clock = clock;
842 						err_most = this_err;
843 						max_n = clock.n;
844 						found = true;
845 					}
846 				}
847 			}
848 		}
849 	}
850 	return found;
851 }
852 
853 /*
854  * Check if the calculated PLL configuration is more optimal compared to the
855  * best configuration and error found so far. Return the calculated error.
856  */
857 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
858 			       const struct dpll *calculated_clock,
859 			       const struct dpll *best_clock,
860 			       unsigned int best_error_ppm,
861 			       unsigned int *error_ppm)
862 {
863 	/*
864 	 * For CHV ignore the error and consider only the P value.
865 	 * Prefer a bigger P value based on HW requirements.
866 	 */
867 	if (IS_CHERRYVIEW(to_i915(dev))) {
868 		*error_ppm = 0;
869 
870 		return calculated_clock->p > best_clock->p;
871 	}
872 
873 	if (WARN_ON_ONCE(!target_freq))
874 		return false;
875 
876 	*error_ppm = div_u64(1000000ULL *
877 				abs(target_freq - calculated_clock->dot),
878 			     target_freq);
879 	/*
880 	 * Prefer a better P value over a better (smaller) error if the error
881 	 * is small. Ensure this preference for future configurations too by
882 	 * setting the error to 0.
883 	 */
884 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
885 		*error_ppm = 0;
886 
887 		return true;
888 	}
889 
890 	return *error_ppm + 10 < best_error_ppm;
891 }
892 
893 /*
894  * Returns a set of divisors for the desired target clock with the given
895  * refclk, or FALSE.  The returned values represent the clock equation:
896  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
897  */
898 static bool
899 vlv_find_best_dpll(const struct intel_limit *limit,
900 		   struct intel_crtc_state *crtc_state,
901 		   int target, int refclk, struct dpll *match_clock,
902 		   struct dpll *best_clock)
903 {
904 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
905 	struct drm_device *dev = crtc->base.dev;
906 	struct dpll clock;
907 	unsigned int bestppm = 1000000;
908 	/* min update 19.2 MHz */
909 	int max_n = min(limit->n.max, refclk / 19200);
910 	bool found = false;
911 
912 	target *= 5; /* fast clock */
913 
914 	memset(best_clock, 0, sizeof(*best_clock));
915 
916 	/* based on hardware requirement, prefer smaller n to precision */
917 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
918 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
919 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
920 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
921 				clock.p = clock.p1 * clock.p2;
922 				/* based on hardware requirement, prefer bigger m1,m2 values */
923 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
924 					unsigned int ppm;
925 
926 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
927 								     refclk * clock.m1);
928 
929 					vlv_calc_dpll_params(refclk, &clock);
930 
931 					if (!intel_PLL_is_valid(to_i915(dev),
932 								limit,
933 								&clock))
934 						continue;
935 
936 					if (!vlv_PLL_is_optimal(dev, target,
937 								&clock,
938 								best_clock,
939 								bestppm, &ppm))
940 						continue;
941 
942 					*best_clock = clock;
943 					bestppm = ppm;
944 					found = true;
945 				}
946 			}
947 		}
948 	}
949 
950 	return found;
951 }
952 
953 /*
954  * Returns a set of divisors for the desired target clock with the given
955  * refclk, or FALSE.  The returned values represent the clock equation:
956  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
957  */
958 static bool
959 chv_find_best_dpll(const struct intel_limit *limit,
960 		   struct intel_crtc_state *crtc_state,
961 		   int target, int refclk, struct dpll *match_clock,
962 		   struct dpll *best_clock)
963 {
964 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
965 	struct drm_device *dev = crtc->base.dev;
966 	unsigned int best_error_ppm;
967 	struct dpll clock;
968 	u64 m2;
969 	int found = false;
970 
971 	memset(best_clock, 0, sizeof(*best_clock));
972 	best_error_ppm = 1000000;
973 
974 	/*
975 	 * Based on hardware doc, the n always set to 1, and m1 always
976 	 * set to 2.  If requires to support 200Mhz refclk, we need to
977 	 * revisit this because n may not 1 anymore.
978 	 */
979 	clock.n = 1, clock.m1 = 2;
980 	target *= 5;	/* fast clock */
981 
982 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
983 		for (clock.p2 = limit->p2.p2_fast;
984 				clock.p2 >= limit->p2.p2_slow;
985 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
986 			unsigned int error_ppm;
987 
988 			clock.p = clock.p1 * clock.p2;
989 
990 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
991 						   refclk * clock.m1);
992 
993 			if (m2 > INT_MAX/clock.m1)
994 				continue;
995 
996 			clock.m2 = m2;
997 
998 			chv_calc_dpll_params(refclk, &clock);
999 
1000 			if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1001 				continue;
1002 
1003 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1004 						best_error_ppm, &error_ppm))
1005 				continue;
1006 
1007 			*best_clock = clock;
1008 			best_error_ppm = error_ppm;
1009 			found = true;
1010 		}
1011 	}
1012 
1013 	return found;
1014 }
1015 
1016 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1017 			struct dpll *best_clock)
1018 {
1019 	int refclk = 100000;
1020 	const struct intel_limit *limit = &intel_limits_bxt;
1021 
1022 	return chv_find_best_dpll(limit, crtc_state,
1023 				  crtc_state->port_clock, refclk,
1024 				  NULL, best_clock);
1025 }
1026 
1027 bool intel_crtc_active(struct intel_crtc *crtc)
1028 {
1029 	/* Be paranoid as we can arrive here with only partial
1030 	 * state retrieved from the hardware during setup.
1031 	 *
1032 	 * We can ditch the adjusted_mode.crtc_clock check as soon
1033 	 * as Haswell has gained clock readout/fastboot support.
1034 	 *
1035 	 * We can ditch the crtc->primary->state->fb check as soon as we can
1036 	 * properly reconstruct framebuffers.
1037 	 *
1038 	 * FIXME: The intel_crtc->active here should be switched to
1039 	 * crtc->state->active once we have proper CRTC states wired up
1040 	 * for atomic.
1041 	 */
1042 	return crtc->active && crtc->base.primary->state->fb &&
1043 		crtc->config->base.adjusted_mode.crtc_clock;
1044 }
1045 
1046 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1047 					     enum pipe pipe)
1048 {
1049 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1050 
1051 	return crtc->config->cpu_transcoder;
1052 }
1053 
1054 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1055 				    enum pipe pipe)
1056 {
1057 	i915_reg_t reg = PIPEDSL(pipe);
1058 	u32 line1, line2;
1059 	u32 line_mask;
1060 
1061 	if (IS_GEN(dev_priv, 2))
1062 		line_mask = DSL_LINEMASK_GEN2;
1063 	else
1064 		line_mask = DSL_LINEMASK_GEN3;
1065 
1066 	line1 = I915_READ(reg) & line_mask;
1067 	msleep(5);
1068 	line2 = I915_READ(reg) & line_mask;
1069 
1070 	return line1 != line2;
1071 }
1072 
1073 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1074 {
1075 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1076 	enum pipe pipe = crtc->pipe;
1077 
1078 	/* Wait for the display line to settle/start moving */
1079 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1080 		DRM_ERROR("pipe %c scanline %s wait timed out\n",
1081 			  pipe_name(pipe), onoff(state));
1082 }
1083 
1084 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1085 {
1086 	wait_for_pipe_scanline_moving(crtc, false);
1087 }
1088 
1089 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1090 {
1091 	wait_for_pipe_scanline_moving(crtc, true);
1092 }
1093 
1094 static void
1095 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1096 {
1097 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1098 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1099 
1100 	if (INTEL_GEN(dev_priv) >= 4) {
1101 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1102 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1103 
1104 		/* Wait for the Pipe State to go off */
1105 		if (intel_de_wait_for_clear(dev_priv, reg,
1106 					    I965_PIPECONF_ACTIVE, 100))
1107 			WARN(1, "pipe_off wait timed out\n");
1108 	} else {
1109 		intel_wait_for_pipe_scanline_stopped(crtc);
1110 	}
1111 }
1112 
1113 /* Only for pre-ILK configs */
1114 void assert_pll(struct drm_i915_private *dev_priv,
1115 		enum pipe pipe, bool state)
1116 {
1117 	u32 val;
1118 	bool cur_state;
1119 
1120 	val = I915_READ(DPLL(pipe));
1121 	cur_state = !!(val & DPLL_VCO_ENABLE);
1122 	I915_STATE_WARN(cur_state != state,
1123 	     "PLL state assertion failure (expected %s, current %s)\n",
1124 			onoff(state), onoff(cur_state));
1125 }
1126 
1127 /* XXX: the dsi pll is shared between MIPI DSI ports */
1128 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1129 {
1130 	u32 val;
1131 	bool cur_state;
1132 
1133 	vlv_cck_get(dev_priv);
1134 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1135 	vlv_cck_put(dev_priv);
1136 
1137 	cur_state = val & DSI_PLL_VCO_EN;
1138 	I915_STATE_WARN(cur_state != state,
1139 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1140 			onoff(state), onoff(cur_state));
1141 }
1142 
1143 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1144 			  enum pipe pipe, bool state)
1145 {
1146 	bool cur_state;
1147 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1148 								      pipe);
1149 
1150 	if (HAS_DDI(dev_priv)) {
1151 		/* DDI does not have a specific FDI_TX register */
1152 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1153 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1154 	} else {
1155 		u32 val = I915_READ(FDI_TX_CTL(pipe));
1156 		cur_state = !!(val & FDI_TX_ENABLE);
1157 	}
1158 	I915_STATE_WARN(cur_state != state,
1159 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1160 			onoff(state), onoff(cur_state));
1161 }
1162 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1163 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1164 
1165 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1166 			  enum pipe pipe, bool state)
1167 {
1168 	u32 val;
1169 	bool cur_state;
1170 
1171 	val = I915_READ(FDI_RX_CTL(pipe));
1172 	cur_state = !!(val & FDI_RX_ENABLE);
1173 	I915_STATE_WARN(cur_state != state,
1174 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1175 			onoff(state), onoff(cur_state));
1176 }
1177 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1178 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1179 
1180 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1181 				      enum pipe pipe)
1182 {
1183 	u32 val;
1184 
1185 	/* ILK FDI PLL is always enabled */
1186 	if (IS_GEN(dev_priv, 5))
1187 		return;
1188 
1189 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1190 	if (HAS_DDI(dev_priv))
1191 		return;
1192 
1193 	val = I915_READ(FDI_TX_CTL(pipe));
1194 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1195 }
1196 
1197 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1198 		       enum pipe pipe, bool state)
1199 {
1200 	u32 val;
1201 	bool cur_state;
1202 
1203 	val = I915_READ(FDI_RX_CTL(pipe));
1204 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1205 	I915_STATE_WARN(cur_state != state,
1206 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1207 			onoff(state), onoff(cur_state));
1208 }
1209 
1210 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1211 {
1212 	i915_reg_t pp_reg;
1213 	u32 val;
1214 	enum pipe panel_pipe = INVALID_PIPE;
1215 	bool locked = true;
1216 
1217 	if (WARN_ON(HAS_DDI(dev_priv)))
1218 		return;
1219 
1220 	if (HAS_PCH_SPLIT(dev_priv)) {
1221 		u32 port_sel;
1222 
1223 		pp_reg = PP_CONTROL(0);
1224 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1225 
1226 		switch (port_sel) {
1227 		case PANEL_PORT_SELECT_LVDS:
1228 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1229 			break;
1230 		case PANEL_PORT_SELECT_DPA:
1231 			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1232 			break;
1233 		case PANEL_PORT_SELECT_DPC:
1234 			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1235 			break;
1236 		case PANEL_PORT_SELECT_DPD:
1237 			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1238 			break;
1239 		default:
1240 			MISSING_CASE(port_sel);
1241 			break;
1242 		}
1243 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1244 		/* presumably write lock depends on pipe, not port select */
1245 		pp_reg = PP_CONTROL(pipe);
1246 		panel_pipe = pipe;
1247 	} else {
1248 		u32 port_sel;
1249 
1250 		pp_reg = PP_CONTROL(0);
1251 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1252 
1253 		WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1254 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1255 	}
1256 
1257 	val = I915_READ(pp_reg);
1258 	if (!(val & PANEL_POWER_ON) ||
1259 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1260 		locked = false;
1261 
1262 	I915_STATE_WARN(panel_pipe == pipe && locked,
1263 	     "panel assertion failure, pipe %c regs locked\n",
1264 	     pipe_name(pipe));
1265 }
1266 
1267 void assert_pipe(struct drm_i915_private *dev_priv,
1268 		 enum pipe pipe, bool state)
1269 {
1270 	bool cur_state;
1271 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1272 								      pipe);
1273 	enum intel_display_power_domain power_domain;
1274 	intel_wakeref_t wakeref;
1275 
1276 	/* we keep both pipes enabled on 830 */
1277 	if (IS_I830(dev_priv))
1278 		state = true;
1279 
1280 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1281 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1282 	if (wakeref) {
1283 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1284 		cur_state = !!(val & PIPECONF_ENABLE);
1285 
1286 		intel_display_power_put(dev_priv, power_domain, wakeref);
1287 	} else {
1288 		cur_state = false;
1289 	}
1290 
1291 	I915_STATE_WARN(cur_state != state,
1292 	     "pipe %c assertion failure (expected %s, current %s)\n",
1293 			pipe_name(pipe), onoff(state), onoff(cur_state));
1294 }
1295 
1296 static void assert_plane(struct intel_plane *plane, bool state)
1297 {
1298 	enum pipe pipe;
1299 	bool cur_state;
1300 
1301 	cur_state = plane->get_hw_state(plane, &pipe);
1302 
1303 	I915_STATE_WARN(cur_state != state,
1304 			"%s assertion failure (expected %s, current %s)\n",
1305 			plane->base.name, onoff(state), onoff(cur_state));
1306 }
1307 
1308 #define assert_plane_enabled(p) assert_plane(p, true)
1309 #define assert_plane_disabled(p) assert_plane(p, false)
1310 
1311 static void assert_planes_disabled(struct intel_crtc *crtc)
1312 {
1313 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1314 	struct intel_plane *plane;
1315 
1316 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1317 		assert_plane_disabled(plane);
1318 }
1319 
1320 static void assert_vblank_disabled(struct drm_crtc *crtc)
1321 {
1322 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1323 		drm_crtc_vblank_put(crtc);
1324 }
1325 
1326 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1327 				    enum pipe pipe)
1328 {
1329 	u32 val;
1330 	bool enabled;
1331 
1332 	val = I915_READ(PCH_TRANSCONF(pipe));
1333 	enabled = !!(val & TRANS_ENABLE);
1334 	I915_STATE_WARN(enabled,
1335 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1336 	     pipe_name(pipe));
1337 }
1338 
1339 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1340 				   enum pipe pipe, enum port port,
1341 				   i915_reg_t dp_reg)
1342 {
1343 	enum pipe port_pipe;
1344 	bool state;
1345 
1346 	state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1347 
1348 	I915_STATE_WARN(state && port_pipe == pipe,
1349 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
1350 			port_name(port), pipe_name(pipe));
1351 
1352 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1353 			"IBX PCH DP %c still using transcoder B\n",
1354 			port_name(port));
1355 }
1356 
1357 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1358 				     enum pipe pipe, enum port port,
1359 				     i915_reg_t hdmi_reg)
1360 {
1361 	enum pipe port_pipe;
1362 	bool state;
1363 
1364 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1365 
1366 	I915_STATE_WARN(state && port_pipe == pipe,
1367 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1368 			port_name(port), pipe_name(pipe));
1369 
1370 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1371 			"IBX PCH HDMI %c still using transcoder B\n",
1372 			port_name(port));
1373 }
1374 
1375 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1376 				      enum pipe pipe)
1377 {
1378 	enum pipe port_pipe;
1379 
1380 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1381 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1382 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1383 
1384 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1385 			port_pipe == pipe,
1386 			"PCH VGA enabled on transcoder %c, should be disabled\n",
1387 			pipe_name(pipe));
1388 
1389 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1390 			port_pipe == pipe,
1391 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
1392 			pipe_name(pipe));
1393 
1394 	/* PCH SDVOB multiplex with HDMIB */
1395 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1396 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1397 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1398 }
1399 
1400 static void _vlv_enable_pll(struct intel_crtc *crtc,
1401 			    const struct intel_crtc_state *pipe_config)
1402 {
1403 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1404 	enum pipe pipe = crtc->pipe;
1405 
1406 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1407 	POSTING_READ(DPLL(pipe));
1408 	udelay(150);
1409 
1410 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1411 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
1412 }
1413 
1414 static void vlv_enable_pll(struct intel_crtc *crtc,
1415 			   const struct intel_crtc_state *pipe_config)
1416 {
1417 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1418 	enum pipe pipe = crtc->pipe;
1419 
1420 	assert_pipe_disabled(dev_priv, pipe);
1421 
1422 	/* PLL is protected by panel, make sure we can write it */
1423 	assert_panel_unlocked(dev_priv, pipe);
1424 
1425 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1426 		_vlv_enable_pll(crtc, pipe_config);
1427 
1428 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1429 	POSTING_READ(DPLL_MD(pipe));
1430 }
1431 
1432 
1433 static void _chv_enable_pll(struct intel_crtc *crtc,
1434 			    const struct intel_crtc_state *pipe_config)
1435 {
1436 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1437 	enum pipe pipe = crtc->pipe;
1438 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1439 	u32 tmp;
1440 
1441 	vlv_dpio_get(dev_priv);
1442 
1443 	/* Enable back the 10bit clock to display controller */
1444 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1445 	tmp |= DPIO_DCLKP_EN;
1446 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1447 
1448 	vlv_dpio_put(dev_priv);
1449 
1450 	/*
1451 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1452 	 */
1453 	udelay(1);
1454 
1455 	/* Enable PLL */
1456 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1457 
1458 	/* Check PLL is locked */
1459 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1460 		DRM_ERROR("PLL %d failed to lock\n", pipe);
1461 }
1462 
1463 static void chv_enable_pll(struct intel_crtc *crtc,
1464 			   const struct intel_crtc_state *pipe_config)
1465 {
1466 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1467 	enum pipe pipe = crtc->pipe;
1468 
1469 	assert_pipe_disabled(dev_priv, pipe);
1470 
1471 	/* PLL is protected by panel, make sure we can write it */
1472 	assert_panel_unlocked(dev_priv, pipe);
1473 
1474 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1475 		_chv_enable_pll(crtc, pipe_config);
1476 
1477 	if (pipe != PIPE_A) {
1478 		/*
1479 		 * WaPixelRepeatModeFixForC0:chv
1480 		 *
1481 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1482 		 * the value from DPLLBMD to either pipe B or C.
1483 		 */
1484 		I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1485 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1486 		I915_WRITE(CBR4_VLV, 0);
1487 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1488 
1489 		/*
1490 		 * DPLLB VGA mode also seems to cause problems.
1491 		 * We should always have it disabled.
1492 		 */
1493 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1494 	} else {
1495 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1496 		POSTING_READ(DPLL_MD(pipe));
1497 	}
1498 }
1499 
1500 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1501 {
1502 	if (IS_I830(dev_priv))
1503 		return false;
1504 
1505 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1506 }
1507 
1508 static void i9xx_enable_pll(struct intel_crtc *crtc,
1509 			    const struct intel_crtc_state *crtc_state)
1510 {
1511 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512 	i915_reg_t reg = DPLL(crtc->pipe);
1513 	u32 dpll = crtc_state->dpll_hw_state.dpll;
1514 	int i;
1515 
1516 	assert_pipe_disabled(dev_priv, crtc->pipe);
1517 
1518 	/* PLL is protected by panel, make sure we can write it */
1519 	if (i9xx_has_pps(dev_priv))
1520 		assert_panel_unlocked(dev_priv, crtc->pipe);
1521 
1522 	/*
1523 	 * Apparently we need to have VGA mode enabled prior to changing
1524 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1525 	 * dividers, even though the register value does change.
1526 	 */
1527 	I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1528 	I915_WRITE(reg, dpll);
1529 
1530 	/* Wait for the clocks to stabilize. */
1531 	POSTING_READ(reg);
1532 	udelay(150);
1533 
1534 	if (INTEL_GEN(dev_priv) >= 4) {
1535 		I915_WRITE(DPLL_MD(crtc->pipe),
1536 			   crtc_state->dpll_hw_state.dpll_md);
1537 	} else {
1538 		/* The pixel multiplier can only be updated once the
1539 		 * DPLL is enabled and the clocks are stable.
1540 		 *
1541 		 * So write it again.
1542 		 */
1543 		I915_WRITE(reg, dpll);
1544 	}
1545 
1546 	/* We do this three times for luck */
1547 	for (i = 0; i < 3; i++) {
1548 		I915_WRITE(reg, dpll);
1549 		POSTING_READ(reg);
1550 		udelay(150); /* wait for warmup */
1551 	}
1552 }
1553 
1554 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1555 {
1556 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1557 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1558 	enum pipe pipe = crtc->pipe;
1559 
1560 	/* Don't disable pipe or pipe PLLs if needed */
1561 	if (IS_I830(dev_priv))
1562 		return;
1563 
1564 	/* Make sure the pipe isn't still relying on us */
1565 	assert_pipe_disabled(dev_priv, pipe);
1566 
1567 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1568 	POSTING_READ(DPLL(pipe));
1569 }
1570 
1571 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1572 {
1573 	u32 val;
1574 
1575 	/* Make sure the pipe isn't still relying on us */
1576 	assert_pipe_disabled(dev_priv, pipe);
1577 
1578 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1579 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1580 	if (pipe != PIPE_A)
1581 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1582 
1583 	I915_WRITE(DPLL(pipe), val);
1584 	POSTING_READ(DPLL(pipe));
1585 }
1586 
1587 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1588 {
1589 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1590 	u32 val;
1591 
1592 	/* Make sure the pipe isn't still relying on us */
1593 	assert_pipe_disabled(dev_priv, pipe);
1594 
1595 	val = DPLL_SSC_REF_CLK_CHV |
1596 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1597 	if (pipe != PIPE_A)
1598 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1599 
1600 	I915_WRITE(DPLL(pipe), val);
1601 	POSTING_READ(DPLL(pipe));
1602 
1603 	vlv_dpio_get(dev_priv);
1604 
1605 	/* Disable 10bit clock to display controller */
1606 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1607 	val &= ~DPIO_DCLKP_EN;
1608 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1609 
1610 	vlv_dpio_put(dev_priv);
1611 }
1612 
1613 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1614 			 struct intel_digital_port *dport,
1615 			 unsigned int expected_mask)
1616 {
1617 	u32 port_mask;
1618 	i915_reg_t dpll_reg;
1619 
1620 	switch (dport->base.port) {
1621 	case PORT_B:
1622 		port_mask = DPLL_PORTB_READY_MASK;
1623 		dpll_reg = DPLL(0);
1624 		break;
1625 	case PORT_C:
1626 		port_mask = DPLL_PORTC_READY_MASK;
1627 		dpll_reg = DPLL(0);
1628 		expected_mask <<= 4;
1629 		break;
1630 	case PORT_D:
1631 		port_mask = DPLL_PORTD_READY_MASK;
1632 		dpll_reg = DPIO_PHY_STATUS;
1633 		break;
1634 	default:
1635 		BUG();
1636 	}
1637 
1638 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
1639 				       port_mask, expected_mask, 1000))
1640 		WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1641 		     dport->base.base.base.id, dport->base.base.name,
1642 		     I915_READ(dpll_reg) & port_mask, expected_mask);
1643 }
1644 
1645 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1646 {
1647 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1648 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1649 	enum pipe pipe = crtc->pipe;
1650 	i915_reg_t reg;
1651 	u32 val, pipeconf_val;
1652 
1653 	/* Make sure PCH DPLL is enabled */
1654 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1655 
1656 	/* FDI must be feeding us bits for PCH ports */
1657 	assert_fdi_tx_enabled(dev_priv, pipe);
1658 	assert_fdi_rx_enabled(dev_priv, pipe);
1659 
1660 	if (HAS_PCH_CPT(dev_priv)) {
1661 		/* Workaround: Set the timing override bit before enabling the
1662 		 * pch transcoder. */
1663 		reg = TRANS_CHICKEN2(pipe);
1664 		val = I915_READ(reg);
1665 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1666 		I915_WRITE(reg, val);
1667 	}
1668 
1669 	reg = PCH_TRANSCONF(pipe);
1670 	val = I915_READ(reg);
1671 	pipeconf_val = I915_READ(PIPECONF(pipe));
1672 
1673 	if (HAS_PCH_IBX(dev_priv)) {
1674 		/*
1675 		 * Make the BPC in transcoder be consistent with
1676 		 * that in pipeconf reg. For HDMI we must use 8bpc
1677 		 * here for both 8bpc and 12bpc.
1678 		 */
1679 		val &= ~PIPECONF_BPC_MASK;
1680 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1681 			val |= PIPECONF_8BPC;
1682 		else
1683 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1684 	}
1685 
1686 	val &= ~TRANS_INTERLACE_MASK;
1687 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1688 		if (HAS_PCH_IBX(dev_priv) &&
1689 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1690 			val |= TRANS_LEGACY_INTERLACED_ILK;
1691 		else
1692 			val |= TRANS_INTERLACED;
1693 	} else {
1694 		val |= TRANS_PROGRESSIVE;
1695 	}
1696 
1697 	I915_WRITE(reg, val | TRANS_ENABLE);
1698 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1699 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1700 }
1701 
1702 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1703 				      enum transcoder cpu_transcoder)
1704 {
1705 	u32 val, pipeconf_val;
1706 
1707 	/* FDI must be feeding us bits for PCH ports */
1708 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1709 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
1710 
1711 	/* Workaround: set timing override bit. */
1712 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1713 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1714 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1715 
1716 	val = TRANS_ENABLE;
1717 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1718 
1719 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1720 	    PIPECONF_INTERLACED_ILK)
1721 		val |= TRANS_INTERLACED;
1722 	else
1723 		val |= TRANS_PROGRESSIVE;
1724 
1725 	I915_WRITE(LPT_TRANSCONF, val);
1726 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1727 				  TRANS_STATE_ENABLE, 100))
1728 		DRM_ERROR("Failed to enable PCH transcoder\n");
1729 }
1730 
1731 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1732 					    enum pipe pipe)
1733 {
1734 	i915_reg_t reg;
1735 	u32 val;
1736 
1737 	/* FDI relies on the transcoder */
1738 	assert_fdi_tx_disabled(dev_priv, pipe);
1739 	assert_fdi_rx_disabled(dev_priv, pipe);
1740 
1741 	/* Ports must be off as well */
1742 	assert_pch_ports_disabled(dev_priv, pipe);
1743 
1744 	reg = PCH_TRANSCONF(pipe);
1745 	val = I915_READ(reg);
1746 	val &= ~TRANS_ENABLE;
1747 	I915_WRITE(reg, val);
1748 	/* wait for PCH transcoder off, transcoder state */
1749 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1750 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1751 
1752 	if (HAS_PCH_CPT(dev_priv)) {
1753 		/* Workaround: Clear the timing override chicken bit again. */
1754 		reg = TRANS_CHICKEN2(pipe);
1755 		val = I915_READ(reg);
1756 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1757 		I915_WRITE(reg, val);
1758 	}
1759 }
1760 
1761 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1762 {
1763 	u32 val;
1764 
1765 	val = I915_READ(LPT_TRANSCONF);
1766 	val &= ~TRANS_ENABLE;
1767 	I915_WRITE(LPT_TRANSCONF, val);
1768 	/* wait for PCH transcoder off, transcoder state */
1769 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1770 				    TRANS_STATE_ENABLE, 50))
1771 		DRM_ERROR("Failed to disable PCH transcoder\n");
1772 
1773 	/* Workaround: clear timing override bit. */
1774 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1775 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1776 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1777 }
1778 
1779 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1780 {
1781 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1782 
1783 	if (HAS_PCH_LPT(dev_priv))
1784 		return PIPE_A;
1785 	else
1786 		return crtc->pipe;
1787 }
1788 
1789 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1790 {
1791 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1792 
1793 	/*
1794 	 * On i965gm the hardware frame counter reads
1795 	 * zero when the TV encoder is enabled :(
1796 	 */
1797 	if (IS_I965GM(dev_priv) &&
1798 	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1799 		return 0;
1800 
1801 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1802 		return 0xffffffff; /* full 32 bit counter */
1803 	else if (INTEL_GEN(dev_priv) >= 3)
1804 		return 0xffffff; /* only 24 bits of frame count */
1805 	else
1806 		return 0; /* Gen2 doesn't have a hardware frame counter */
1807 }
1808 
1809 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1810 {
1811 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1812 
1813 	drm_crtc_set_max_vblank_count(&crtc->base,
1814 				      intel_crtc_max_vblank_count(crtc_state));
1815 	drm_crtc_vblank_on(&crtc->base);
1816 }
1817 
1818 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1819 {
1820 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1821 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1822 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1823 	enum pipe pipe = crtc->pipe;
1824 	i915_reg_t reg;
1825 	u32 val;
1826 
1827 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1828 
1829 	assert_planes_disabled(crtc);
1830 
1831 	/*
1832 	 * A pipe without a PLL won't actually be able to drive bits from
1833 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1834 	 * need the check.
1835 	 */
1836 	if (HAS_GMCH(dev_priv)) {
1837 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1838 			assert_dsi_pll_enabled(dev_priv);
1839 		else
1840 			assert_pll_enabled(dev_priv, pipe);
1841 	} else {
1842 		if (new_crtc_state->has_pch_encoder) {
1843 			/* if driving the PCH, we need FDI enabled */
1844 			assert_fdi_rx_pll_enabled(dev_priv,
1845 						  intel_crtc_pch_transcoder(crtc));
1846 			assert_fdi_tx_pll_enabled(dev_priv,
1847 						  (enum pipe) cpu_transcoder);
1848 		}
1849 		/* FIXME: assert CPU port conditions for SNB+ */
1850 	}
1851 
1852 	trace_intel_pipe_enable(crtc);
1853 
1854 	reg = PIPECONF(cpu_transcoder);
1855 	val = I915_READ(reg);
1856 	if (val & PIPECONF_ENABLE) {
1857 		/* we keep both pipes enabled on 830 */
1858 		WARN_ON(!IS_I830(dev_priv));
1859 		return;
1860 	}
1861 
1862 	I915_WRITE(reg, val | PIPECONF_ENABLE);
1863 	POSTING_READ(reg);
1864 
1865 	/*
1866 	 * Until the pipe starts PIPEDSL reads will return a stale value,
1867 	 * which causes an apparent vblank timestamp jump when PIPEDSL
1868 	 * resets to its proper value. That also messes up the frame count
1869 	 * when it's derived from the timestamps. So let's wait for the
1870 	 * pipe to start properly before we call drm_crtc_vblank_on()
1871 	 */
1872 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1873 		intel_wait_for_pipe_scanline_moving(crtc);
1874 }
1875 
1876 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1877 {
1878 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1879 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1880 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1881 	enum pipe pipe = crtc->pipe;
1882 	i915_reg_t reg;
1883 	u32 val;
1884 
1885 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1886 
1887 	/*
1888 	 * Make sure planes won't keep trying to pump pixels to us,
1889 	 * or we might hang the display.
1890 	 */
1891 	assert_planes_disabled(crtc);
1892 
1893 	trace_intel_pipe_disable(crtc);
1894 
1895 	reg = PIPECONF(cpu_transcoder);
1896 	val = I915_READ(reg);
1897 	if ((val & PIPECONF_ENABLE) == 0)
1898 		return;
1899 
1900 	/*
1901 	 * Double wide has implications for planes
1902 	 * so best keep it disabled when not needed.
1903 	 */
1904 	if (old_crtc_state->double_wide)
1905 		val &= ~PIPECONF_DOUBLE_WIDE;
1906 
1907 	/* Don't disable pipe or pipe PLLs if needed */
1908 	if (!IS_I830(dev_priv))
1909 		val &= ~PIPECONF_ENABLE;
1910 
1911 	I915_WRITE(reg, val);
1912 	if ((val & PIPECONF_ENABLE) == 0)
1913 		intel_wait_for_pipe_off(old_crtc_state);
1914 }
1915 
1916 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1917 {
1918 	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1919 }
1920 
1921 static unsigned int
1922 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1923 {
1924 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1925 	unsigned int cpp = fb->format->cpp[color_plane];
1926 
1927 	switch (fb->modifier) {
1928 	case DRM_FORMAT_MOD_LINEAR:
1929 		return intel_tile_size(dev_priv);
1930 	case I915_FORMAT_MOD_X_TILED:
1931 		if (IS_GEN(dev_priv, 2))
1932 			return 128;
1933 		else
1934 			return 512;
1935 	case I915_FORMAT_MOD_Y_TILED_CCS:
1936 		if (color_plane == 1)
1937 			return 128;
1938 		/* fall through */
1939 	case I915_FORMAT_MOD_Y_TILED:
1940 		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1941 			return 128;
1942 		else
1943 			return 512;
1944 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1945 		if (color_plane == 1)
1946 			return 128;
1947 		/* fall through */
1948 	case I915_FORMAT_MOD_Yf_TILED:
1949 		switch (cpp) {
1950 		case 1:
1951 			return 64;
1952 		case 2:
1953 		case 4:
1954 			return 128;
1955 		case 8:
1956 		case 16:
1957 			return 256;
1958 		default:
1959 			MISSING_CASE(cpp);
1960 			return cpp;
1961 		}
1962 		break;
1963 	default:
1964 		MISSING_CASE(fb->modifier);
1965 		return cpp;
1966 	}
1967 }
1968 
1969 static unsigned int
1970 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1971 {
1972 	return intel_tile_size(to_i915(fb->dev)) /
1973 		intel_tile_width_bytes(fb, color_plane);
1974 }
1975 
1976 /* Return the tile dimensions in pixel units */
1977 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1978 			    unsigned int *tile_width,
1979 			    unsigned int *tile_height)
1980 {
1981 	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1982 	unsigned int cpp = fb->format->cpp[color_plane];
1983 
1984 	*tile_width = tile_width_bytes / cpp;
1985 	*tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1986 }
1987 
1988 unsigned int
1989 intel_fb_align_height(const struct drm_framebuffer *fb,
1990 		      int color_plane, unsigned int height)
1991 {
1992 	unsigned int tile_height = intel_tile_height(fb, color_plane);
1993 
1994 	return ALIGN(height, tile_height);
1995 }
1996 
1997 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1998 {
1999 	unsigned int size = 0;
2000 	int i;
2001 
2002 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2003 		size += rot_info->plane[i].width * rot_info->plane[i].height;
2004 
2005 	return size;
2006 }
2007 
2008 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2009 {
2010 	unsigned int size = 0;
2011 	int i;
2012 
2013 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2014 		size += rem_info->plane[i].width * rem_info->plane[i].height;
2015 
2016 	return size;
2017 }
2018 
2019 static void
2020 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2021 			const struct drm_framebuffer *fb,
2022 			unsigned int rotation)
2023 {
2024 	view->type = I915_GGTT_VIEW_NORMAL;
2025 	if (drm_rotation_90_or_270(rotation)) {
2026 		view->type = I915_GGTT_VIEW_ROTATED;
2027 		view->rotated = to_intel_framebuffer(fb)->rot_info;
2028 	}
2029 }
2030 
2031 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2032 {
2033 	if (IS_I830(dev_priv))
2034 		return 16 * 1024;
2035 	else if (IS_I85X(dev_priv))
2036 		return 256;
2037 	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2038 		return 32;
2039 	else
2040 		return 4 * 1024;
2041 }
2042 
2043 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2044 {
2045 	if (INTEL_GEN(dev_priv) >= 9)
2046 		return 256 * 1024;
2047 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2048 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2049 		return 128 * 1024;
2050 	else if (INTEL_GEN(dev_priv) >= 4)
2051 		return 4 * 1024;
2052 	else
2053 		return 0;
2054 }
2055 
2056 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2057 					 int color_plane)
2058 {
2059 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2060 
2061 	/* AUX_DIST needs only 4K alignment */
2062 	if (color_plane == 1)
2063 		return 4096;
2064 
2065 	switch (fb->modifier) {
2066 	case DRM_FORMAT_MOD_LINEAR:
2067 		return intel_linear_alignment(dev_priv);
2068 	case I915_FORMAT_MOD_X_TILED:
2069 		if (INTEL_GEN(dev_priv) >= 9)
2070 			return 256 * 1024;
2071 		return 0;
2072 	case I915_FORMAT_MOD_Y_TILED_CCS:
2073 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2074 	case I915_FORMAT_MOD_Y_TILED:
2075 	case I915_FORMAT_MOD_Yf_TILED:
2076 		return 1 * 1024 * 1024;
2077 	default:
2078 		MISSING_CASE(fb->modifier);
2079 		return 0;
2080 	}
2081 }
2082 
2083 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2084 {
2085 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2086 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2087 
2088 	return INTEL_GEN(dev_priv) < 4 ||
2089 		(plane->has_fbc &&
2090 		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2091 }
2092 
2093 struct i915_vma *
2094 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2095 			   const struct i915_ggtt_view *view,
2096 			   bool uses_fence,
2097 			   unsigned long *out_flags)
2098 {
2099 	struct drm_device *dev = fb->dev;
2100 	struct drm_i915_private *dev_priv = to_i915(dev);
2101 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2102 	intel_wakeref_t wakeref;
2103 	struct i915_vma *vma;
2104 	unsigned int pinctl;
2105 	u32 alignment;
2106 
2107 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2108 		return ERR_PTR(-EINVAL);
2109 
2110 	alignment = intel_surf_alignment(fb, 0);
2111 
2112 	/* Note that the w/a also requires 64 PTE of padding following the
2113 	 * bo. We currently fill all unused PTE with the shadow page and so
2114 	 * we should always have valid PTE following the scanout preventing
2115 	 * the VT-d warning.
2116 	 */
2117 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2118 		alignment = 256 * 1024;
2119 
2120 	/*
2121 	 * Global gtt pte registers are special registers which actually forward
2122 	 * writes to a chunk of system memory. Which means that there is no risk
2123 	 * that the register values disappear as soon as we call
2124 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2125 	 * pin/unpin/fence and not more.
2126 	 */
2127 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2128 	i915_gem_object_lock(obj);
2129 
2130 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2131 
2132 	pinctl = 0;
2133 
2134 	/* Valleyview is definitely limited to scanning out the first
2135 	 * 512MiB. Lets presume this behaviour was inherited from the
2136 	 * g4x display engine and that all earlier gen are similarly
2137 	 * limited. Testing suggests that it is a little more
2138 	 * complicated than this. For example, Cherryview appears quite
2139 	 * happy to scanout from anywhere within its global aperture.
2140 	 */
2141 	if (HAS_GMCH(dev_priv))
2142 		pinctl |= PIN_MAPPABLE;
2143 
2144 	vma = i915_gem_object_pin_to_display_plane(obj,
2145 						   alignment, view, pinctl);
2146 	if (IS_ERR(vma))
2147 		goto err;
2148 
2149 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2150 		int ret;
2151 
2152 		/* Install a fence for tiled scan-out. Pre-i965 always needs a
2153 		 * fence, whereas 965+ only requires a fence if using
2154 		 * framebuffer compression.  For simplicity, we always, when
2155 		 * possible, install a fence as the cost is not that onerous.
2156 		 *
2157 		 * If we fail to fence the tiled scanout, then either the
2158 		 * modeset will reject the change (which is highly unlikely as
2159 		 * the affected systems, all but one, do not have unmappable
2160 		 * space) or we will not be able to enable full powersaving
2161 		 * techniques (also likely not to apply due to various limits
2162 		 * FBC and the like impose on the size of the buffer, which
2163 		 * presumably we violated anyway with this unmappable buffer).
2164 		 * Anyway, it is presumably better to stumble onwards with
2165 		 * something and try to run the system in a "less than optimal"
2166 		 * mode that matches the user configuration.
2167 		 */
2168 		ret = i915_vma_pin_fence(vma);
2169 		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2170 			i915_gem_object_unpin_from_display_plane(vma);
2171 			vma = ERR_PTR(ret);
2172 			goto err;
2173 		}
2174 
2175 		if (ret == 0 && vma->fence)
2176 			*out_flags |= PLANE_HAS_FENCE;
2177 	}
2178 
2179 	i915_vma_get(vma);
2180 err:
2181 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2182 
2183 	i915_gem_object_unlock(obj);
2184 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2185 	return vma;
2186 }
2187 
2188 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2189 {
2190 	i915_gem_object_lock(vma->obj);
2191 	if (flags & PLANE_HAS_FENCE)
2192 		i915_vma_unpin_fence(vma);
2193 	i915_gem_object_unpin_from_display_plane(vma);
2194 	i915_gem_object_unlock(vma->obj);
2195 
2196 	i915_vma_put(vma);
2197 }
2198 
2199 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2200 			  unsigned int rotation)
2201 {
2202 	if (drm_rotation_90_or_270(rotation))
2203 		return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2204 	else
2205 		return fb->pitches[color_plane];
2206 }
2207 
2208 /*
2209  * Convert the x/y offsets into a linear offset.
2210  * Only valid with 0/180 degree rotation, which is fine since linear
2211  * offset is only used with linear buffers on pre-hsw and tiled buffers
2212  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2213  */
2214 u32 intel_fb_xy_to_linear(int x, int y,
2215 			  const struct intel_plane_state *state,
2216 			  int color_plane)
2217 {
2218 	const struct drm_framebuffer *fb = state->base.fb;
2219 	unsigned int cpp = fb->format->cpp[color_plane];
2220 	unsigned int pitch = state->color_plane[color_plane].stride;
2221 
2222 	return y * pitch + x * cpp;
2223 }
2224 
2225 /*
2226  * Add the x/y offsets derived from fb->offsets[] to the user
2227  * specified plane src x/y offsets. The resulting x/y offsets
2228  * specify the start of scanout from the beginning of the gtt mapping.
2229  */
2230 void intel_add_fb_offsets(int *x, int *y,
2231 			  const struct intel_plane_state *state,
2232 			  int color_plane)
2233 
2234 {
2235 	*x += state->color_plane[color_plane].x;
2236 	*y += state->color_plane[color_plane].y;
2237 }
2238 
2239 static u32 intel_adjust_tile_offset(int *x, int *y,
2240 				    unsigned int tile_width,
2241 				    unsigned int tile_height,
2242 				    unsigned int tile_size,
2243 				    unsigned int pitch_tiles,
2244 				    u32 old_offset,
2245 				    u32 new_offset)
2246 {
2247 	unsigned int pitch_pixels = pitch_tiles * tile_width;
2248 	unsigned int tiles;
2249 
2250 	WARN_ON(old_offset & (tile_size - 1));
2251 	WARN_ON(new_offset & (tile_size - 1));
2252 	WARN_ON(new_offset > old_offset);
2253 
2254 	tiles = (old_offset - new_offset) / tile_size;
2255 
2256 	*y += tiles / pitch_tiles * tile_height;
2257 	*x += tiles % pitch_tiles * tile_width;
2258 
2259 	/* minimize x in case it got needlessly big */
2260 	*y += *x / pitch_pixels * tile_height;
2261 	*x %= pitch_pixels;
2262 
2263 	return new_offset;
2264 }
2265 
2266 static bool is_surface_linear(u64 modifier, int color_plane)
2267 {
2268 	return modifier == DRM_FORMAT_MOD_LINEAR;
2269 }
2270 
2271 static u32 intel_adjust_aligned_offset(int *x, int *y,
2272 				       const struct drm_framebuffer *fb,
2273 				       int color_plane,
2274 				       unsigned int rotation,
2275 				       unsigned int pitch,
2276 				       u32 old_offset, u32 new_offset)
2277 {
2278 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2279 	unsigned int cpp = fb->format->cpp[color_plane];
2280 
2281 	WARN_ON(new_offset > old_offset);
2282 
2283 	if (!is_surface_linear(fb->modifier, color_plane)) {
2284 		unsigned int tile_size, tile_width, tile_height;
2285 		unsigned int pitch_tiles;
2286 
2287 		tile_size = intel_tile_size(dev_priv);
2288 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2289 
2290 		if (drm_rotation_90_or_270(rotation)) {
2291 			pitch_tiles = pitch / tile_height;
2292 			swap(tile_width, tile_height);
2293 		} else {
2294 			pitch_tiles = pitch / (tile_width * cpp);
2295 		}
2296 
2297 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2298 					 tile_size, pitch_tiles,
2299 					 old_offset, new_offset);
2300 	} else {
2301 		old_offset += *y * pitch + *x * cpp;
2302 
2303 		*y = (old_offset - new_offset) / pitch;
2304 		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
2305 	}
2306 
2307 	return new_offset;
2308 }
2309 
2310 /*
2311  * Adjust the tile offset by moving the difference into
2312  * the x/y offsets.
2313  */
2314 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2315 					     const struct intel_plane_state *state,
2316 					     int color_plane,
2317 					     u32 old_offset, u32 new_offset)
2318 {
2319 	return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2320 					   state->base.rotation,
2321 					   state->color_plane[color_plane].stride,
2322 					   old_offset, new_offset);
2323 }
2324 
2325 /*
2326  * Computes the aligned offset to the base tile and adjusts
2327  * x, y. bytes per pixel is assumed to be a power-of-two.
2328  *
2329  * In the 90/270 rotated case, x and y are assumed
2330  * to be already rotated to match the rotated GTT view, and
2331  * pitch is the tile_height aligned framebuffer height.
2332  *
2333  * This function is used when computing the derived information
2334  * under intel_framebuffer, so using any of that information
2335  * here is not allowed. Anything under drm_framebuffer can be
2336  * used. This is why the user has to pass in the pitch since it
2337  * is specified in the rotated orientation.
2338  */
2339 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2340 					int *x, int *y,
2341 					const struct drm_framebuffer *fb,
2342 					int color_plane,
2343 					unsigned int pitch,
2344 					unsigned int rotation,
2345 					u32 alignment)
2346 {
2347 	unsigned int cpp = fb->format->cpp[color_plane];
2348 	u32 offset, offset_aligned;
2349 
2350 	if (alignment)
2351 		alignment--;
2352 
2353 	if (!is_surface_linear(fb->modifier, color_plane)) {
2354 		unsigned int tile_size, tile_width, tile_height;
2355 		unsigned int tile_rows, tiles, pitch_tiles;
2356 
2357 		tile_size = intel_tile_size(dev_priv);
2358 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2359 
2360 		if (drm_rotation_90_or_270(rotation)) {
2361 			pitch_tiles = pitch / tile_height;
2362 			swap(tile_width, tile_height);
2363 		} else {
2364 			pitch_tiles = pitch / (tile_width * cpp);
2365 		}
2366 
2367 		tile_rows = *y / tile_height;
2368 		*y %= tile_height;
2369 
2370 		tiles = *x / tile_width;
2371 		*x %= tile_width;
2372 
2373 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2374 		offset_aligned = offset & ~alignment;
2375 
2376 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2377 					 tile_size, pitch_tiles,
2378 					 offset, offset_aligned);
2379 	} else {
2380 		offset = *y * pitch + *x * cpp;
2381 		offset_aligned = offset & ~alignment;
2382 
2383 		*y = (offset & alignment) / pitch;
2384 		*x = ((offset & alignment) - *y * pitch) / cpp;
2385 	}
2386 
2387 	return offset_aligned;
2388 }
2389 
2390 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2391 					      const struct intel_plane_state *state,
2392 					      int color_plane)
2393 {
2394 	struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2395 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2396 	const struct drm_framebuffer *fb = state->base.fb;
2397 	unsigned int rotation = state->base.rotation;
2398 	int pitch = state->color_plane[color_plane].stride;
2399 	u32 alignment;
2400 
2401 	if (intel_plane->id == PLANE_CURSOR)
2402 		alignment = intel_cursor_alignment(dev_priv);
2403 	else
2404 		alignment = intel_surf_alignment(fb, color_plane);
2405 
2406 	return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2407 					    pitch, rotation, alignment);
2408 }
2409 
2410 /* Convert the fb->offset[] into x/y offsets */
2411 static int intel_fb_offset_to_xy(int *x, int *y,
2412 				 const struct drm_framebuffer *fb,
2413 				 int color_plane)
2414 {
2415 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2416 	unsigned int height;
2417 
2418 	if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2419 	    fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2420 		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2421 			      fb->offsets[color_plane], color_plane);
2422 		return -EINVAL;
2423 	}
2424 
2425 	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2426 	height = ALIGN(height, intel_tile_height(fb, color_plane));
2427 
2428 	/* Catch potential overflows early */
2429 	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2430 			    fb->offsets[color_plane])) {
2431 		DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2432 			      fb->offsets[color_plane], fb->pitches[color_plane],
2433 			      color_plane);
2434 		return -ERANGE;
2435 	}
2436 
2437 	*x = 0;
2438 	*y = 0;
2439 
2440 	intel_adjust_aligned_offset(x, y,
2441 				    fb, color_plane, DRM_MODE_ROTATE_0,
2442 				    fb->pitches[color_plane],
2443 				    fb->offsets[color_plane], 0);
2444 
2445 	return 0;
2446 }
2447 
2448 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2449 {
2450 	switch (fb_modifier) {
2451 	case I915_FORMAT_MOD_X_TILED:
2452 		return I915_TILING_X;
2453 	case I915_FORMAT_MOD_Y_TILED:
2454 	case I915_FORMAT_MOD_Y_TILED_CCS:
2455 		return I915_TILING_Y;
2456 	default:
2457 		return I915_TILING_NONE;
2458 	}
2459 }
2460 
2461 /*
2462  * From the Sky Lake PRM:
2463  * "The Color Control Surface (CCS) contains the compression status of
2464  *  the cache-line pairs. The compression state of the cache-line pair
2465  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2466  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2467  *  cache-line-pairs. CCS is always Y tiled."
2468  *
2469  * Since cache line pairs refers to horizontally adjacent cache lines,
2470  * each cache line in the CCS corresponds to an area of 32x16 cache
2471  * lines on the main surface. Since each pixel is 4 bytes, this gives
2472  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2473  * main surface.
2474  */
2475 static const struct drm_format_info ccs_formats[] = {
2476 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2477 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2478 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2479 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2480 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2481 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2482 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2483 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2484 };
2485 
2486 static const struct drm_format_info *
2487 lookup_format_info(const struct drm_format_info formats[],
2488 		   int num_formats, u32 format)
2489 {
2490 	int i;
2491 
2492 	for (i = 0; i < num_formats; i++) {
2493 		if (formats[i].format == format)
2494 			return &formats[i];
2495 	}
2496 
2497 	return NULL;
2498 }
2499 
2500 static const struct drm_format_info *
2501 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2502 {
2503 	switch (cmd->modifier[0]) {
2504 	case I915_FORMAT_MOD_Y_TILED_CCS:
2505 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2506 		return lookup_format_info(ccs_formats,
2507 					  ARRAY_SIZE(ccs_formats),
2508 					  cmd->pixel_format);
2509 	default:
2510 		return NULL;
2511 	}
2512 }
2513 
2514 bool is_ccs_modifier(u64 modifier)
2515 {
2516 	return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2517 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2518 }
2519 
2520 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2521 			      u32 pixel_format, u64 modifier)
2522 {
2523 	struct intel_crtc *crtc;
2524 	struct intel_plane *plane;
2525 
2526 	/*
2527 	 * We assume the primary plane for pipe A has
2528 	 * the highest stride limits of them all.
2529 	 */
2530 	crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2531 	plane = to_intel_plane(crtc->base.primary);
2532 
2533 	return plane->max_stride(plane, pixel_format, modifier,
2534 				 DRM_MODE_ROTATE_0);
2535 }
2536 
2537 static
2538 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2539 			u32 pixel_format, u64 modifier)
2540 {
2541 	/*
2542 	 * Arbitrary limit for gen4+ chosen to match the
2543 	 * render engine max stride.
2544 	 *
2545 	 * The new CCS hash mode makes remapping impossible
2546 	 */
2547 	if (!is_ccs_modifier(modifier)) {
2548 		if (INTEL_GEN(dev_priv) >= 7)
2549 			return 256*1024;
2550 		else if (INTEL_GEN(dev_priv) >= 4)
2551 			return 128*1024;
2552 	}
2553 
2554 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2555 }
2556 
2557 static u32
2558 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2559 {
2560 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2561 
2562 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2563 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2564 							   fb->format->format,
2565 							   fb->modifier);
2566 
2567 		/*
2568 		 * To make remapping with linear generally feasible
2569 		 * we need the stride to be page aligned.
2570 		 */
2571 		if (fb->pitches[color_plane] > max_stride)
2572 			return intel_tile_size(dev_priv);
2573 		else
2574 			return 64;
2575 	} else {
2576 		return intel_tile_width_bytes(fb, color_plane);
2577 	}
2578 }
2579 
2580 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2581 {
2582 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2583 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2584 	const struct drm_framebuffer *fb = plane_state->base.fb;
2585 	int i;
2586 
2587 	/* We don't want to deal with remapping with cursors */
2588 	if (plane->id == PLANE_CURSOR)
2589 		return false;
2590 
2591 	/*
2592 	 * The display engine limits already match/exceed the
2593 	 * render engine limits, so not much point in remapping.
2594 	 * Would also need to deal with the fence POT alignment
2595 	 * and gen2 2KiB GTT tile size.
2596 	 */
2597 	if (INTEL_GEN(dev_priv) < 4)
2598 		return false;
2599 
2600 	/*
2601 	 * The new CCS hash mode isn't compatible with remapping as
2602 	 * the virtual address of the pages affects the compressed data.
2603 	 */
2604 	if (is_ccs_modifier(fb->modifier))
2605 		return false;
2606 
2607 	/* Linear needs a page aligned stride for remapping */
2608 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2609 		unsigned int alignment = intel_tile_size(dev_priv) - 1;
2610 
2611 		for (i = 0; i < fb->format->num_planes; i++) {
2612 			if (fb->pitches[i] & alignment)
2613 				return false;
2614 		}
2615 	}
2616 
2617 	return true;
2618 }
2619 
2620 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2621 {
2622 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2623 	const struct drm_framebuffer *fb = plane_state->base.fb;
2624 	unsigned int rotation = plane_state->base.rotation;
2625 	u32 stride, max_stride;
2626 
2627 	/*
2628 	 * No remapping for invisible planes since we don't have
2629 	 * an actual source viewport to remap.
2630 	 */
2631 	if (!plane_state->base.visible)
2632 		return false;
2633 
2634 	if (!intel_plane_can_remap(plane_state))
2635 		return false;
2636 
2637 	/*
2638 	 * FIXME: aux plane limits on gen9+ are
2639 	 * unclear in Bspec, for now no checking.
2640 	 */
2641 	stride = intel_fb_pitch(fb, 0, rotation);
2642 	max_stride = plane->max_stride(plane, fb->format->format,
2643 				       fb->modifier, rotation);
2644 
2645 	return stride > max_stride;
2646 }
2647 
2648 static int
2649 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2650 		   struct drm_framebuffer *fb)
2651 {
2652 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2653 	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2654 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2655 	u32 gtt_offset_rotated = 0;
2656 	unsigned int max_size = 0;
2657 	int i, num_planes = fb->format->num_planes;
2658 	unsigned int tile_size = intel_tile_size(dev_priv);
2659 
2660 	for (i = 0; i < num_planes; i++) {
2661 		unsigned int width, height;
2662 		unsigned int cpp, size;
2663 		u32 offset;
2664 		int x, y;
2665 		int ret;
2666 
2667 		cpp = fb->format->cpp[i];
2668 		width = drm_framebuffer_plane_width(fb->width, fb, i);
2669 		height = drm_framebuffer_plane_height(fb->height, fb, i);
2670 
2671 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2672 		if (ret) {
2673 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2674 				      i, fb->offsets[i]);
2675 			return ret;
2676 		}
2677 
2678 		if (is_ccs_modifier(fb->modifier) && i == 1) {
2679 			int hsub = fb->format->hsub;
2680 			int vsub = fb->format->vsub;
2681 			int tile_width, tile_height;
2682 			int main_x, main_y;
2683 			int ccs_x, ccs_y;
2684 
2685 			intel_tile_dims(fb, i, &tile_width, &tile_height);
2686 			tile_width *= hsub;
2687 			tile_height *= vsub;
2688 
2689 			ccs_x = (x * hsub) % tile_width;
2690 			ccs_y = (y * vsub) % tile_height;
2691 			main_x = intel_fb->normal[0].x % tile_width;
2692 			main_y = intel_fb->normal[0].y % tile_height;
2693 
2694 			/*
2695 			 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2696 			 * x/y offsets must match between CCS and the main surface.
2697 			 */
2698 			if (main_x != ccs_x || main_y != ccs_y) {
2699 				DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2700 					      main_x, main_y,
2701 					      ccs_x, ccs_y,
2702 					      intel_fb->normal[0].x,
2703 					      intel_fb->normal[0].y,
2704 					      x, y);
2705 				return -EINVAL;
2706 			}
2707 		}
2708 
2709 		/*
2710 		 * The fence (if used) is aligned to the start of the object
2711 		 * so having the framebuffer wrap around across the edge of the
2712 		 * fenced region doesn't really work. We have no API to configure
2713 		 * the fence start offset within the object (nor could we probably
2714 		 * on gen2/3). So it's just easier if we just require that the
2715 		 * fb layout agrees with the fence layout. We already check that the
2716 		 * fb stride matches the fence stride elsewhere.
2717 		 */
2718 		if (i == 0 && i915_gem_object_is_tiled(obj) &&
2719 		    (x + width) * cpp > fb->pitches[i]) {
2720 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2721 				      i, fb->offsets[i]);
2722 			return -EINVAL;
2723 		}
2724 
2725 		/*
2726 		 * First pixel of the framebuffer from
2727 		 * the start of the normal gtt mapping.
2728 		 */
2729 		intel_fb->normal[i].x = x;
2730 		intel_fb->normal[i].y = y;
2731 
2732 		offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2733 						      fb->pitches[i],
2734 						      DRM_MODE_ROTATE_0,
2735 						      tile_size);
2736 		offset /= tile_size;
2737 
2738 		if (!is_surface_linear(fb->modifier, i)) {
2739 			unsigned int tile_width, tile_height;
2740 			unsigned int pitch_tiles;
2741 			struct drm_rect r;
2742 
2743 			intel_tile_dims(fb, i, &tile_width, &tile_height);
2744 
2745 			rot_info->plane[i].offset = offset;
2746 			rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2747 			rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2748 			rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2749 
2750 			intel_fb->rotated[i].pitch =
2751 				rot_info->plane[i].height * tile_height;
2752 
2753 			/* how many tiles does this plane need */
2754 			size = rot_info->plane[i].stride * rot_info->plane[i].height;
2755 			/*
2756 			 * If the plane isn't horizontally tile aligned,
2757 			 * we need one more tile.
2758 			 */
2759 			if (x != 0)
2760 				size++;
2761 
2762 			/* rotate the x/y offsets to match the GTT view */
2763 			drm_rect_init(&r, x, y, width, height);
2764 			drm_rect_rotate(&r,
2765 					rot_info->plane[i].width * tile_width,
2766 					rot_info->plane[i].height * tile_height,
2767 					DRM_MODE_ROTATE_270);
2768 			x = r.x1;
2769 			y = r.y1;
2770 
2771 			/* rotate the tile dimensions to match the GTT view */
2772 			pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2773 			swap(tile_width, tile_height);
2774 
2775 			/*
2776 			 * We only keep the x/y offsets, so push all of the
2777 			 * gtt offset into the x/y offsets.
2778 			 */
2779 			intel_adjust_tile_offset(&x, &y,
2780 						 tile_width, tile_height,
2781 						 tile_size, pitch_tiles,
2782 						 gtt_offset_rotated * tile_size, 0);
2783 
2784 			gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2785 
2786 			/*
2787 			 * First pixel of the framebuffer from
2788 			 * the start of the rotated gtt mapping.
2789 			 */
2790 			intel_fb->rotated[i].x = x;
2791 			intel_fb->rotated[i].y = y;
2792 		} else {
2793 			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2794 					    x * cpp, tile_size);
2795 		}
2796 
2797 		/* how many tiles in total needed in the bo */
2798 		max_size = max(max_size, offset + size);
2799 	}
2800 
2801 	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2802 		DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2803 			      mul_u32_u32(max_size, tile_size), obj->base.size);
2804 		return -EINVAL;
2805 	}
2806 
2807 	return 0;
2808 }
2809 
2810 static void
2811 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2812 {
2813 	struct drm_i915_private *dev_priv =
2814 		to_i915(plane_state->base.plane->dev);
2815 	struct drm_framebuffer *fb = plane_state->base.fb;
2816 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2817 	struct intel_rotation_info *info = &plane_state->view.rotated;
2818 	unsigned int rotation = plane_state->base.rotation;
2819 	int i, num_planes = fb->format->num_planes;
2820 	unsigned int tile_size = intel_tile_size(dev_priv);
2821 	unsigned int src_x, src_y;
2822 	unsigned int src_w, src_h;
2823 	u32 gtt_offset = 0;
2824 
2825 	memset(&plane_state->view, 0, sizeof(plane_state->view));
2826 	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2827 		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2828 
2829 	src_x = plane_state->base.src.x1 >> 16;
2830 	src_y = plane_state->base.src.y1 >> 16;
2831 	src_w = drm_rect_width(&plane_state->base.src) >> 16;
2832 	src_h = drm_rect_height(&plane_state->base.src) >> 16;
2833 
2834 	WARN_ON(is_ccs_modifier(fb->modifier));
2835 
2836 	/* Make src coordinates relative to the viewport */
2837 	drm_rect_translate(&plane_state->base.src,
2838 			   -(src_x << 16), -(src_y << 16));
2839 
2840 	/* Rotate src coordinates to match rotated GTT view */
2841 	if (drm_rotation_90_or_270(rotation))
2842 		drm_rect_rotate(&plane_state->base.src,
2843 				src_w << 16, src_h << 16,
2844 				DRM_MODE_ROTATE_270);
2845 
2846 	for (i = 0; i < num_planes; i++) {
2847 		unsigned int hsub = i ? fb->format->hsub : 1;
2848 		unsigned int vsub = i ? fb->format->vsub : 1;
2849 		unsigned int cpp = fb->format->cpp[i];
2850 		unsigned int tile_width, tile_height;
2851 		unsigned int width, height;
2852 		unsigned int pitch_tiles;
2853 		unsigned int x, y;
2854 		u32 offset;
2855 
2856 		intel_tile_dims(fb, i, &tile_width, &tile_height);
2857 
2858 		x = src_x / hsub;
2859 		y = src_y / vsub;
2860 		width = src_w / hsub;
2861 		height = src_h / vsub;
2862 
2863 		/*
2864 		 * First pixel of the src viewport from the
2865 		 * start of the normal gtt mapping.
2866 		 */
2867 		x += intel_fb->normal[i].x;
2868 		y += intel_fb->normal[i].y;
2869 
2870 		offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2871 						      fb, i, fb->pitches[i],
2872 						      DRM_MODE_ROTATE_0, tile_size);
2873 		offset /= tile_size;
2874 
2875 		info->plane[i].offset = offset;
2876 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2877 						     tile_width * cpp);
2878 		info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2879 		info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2880 
2881 		if (drm_rotation_90_or_270(rotation)) {
2882 			struct drm_rect r;
2883 
2884 			/* rotate the x/y offsets to match the GTT view */
2885 			drm_rect_init(&r, x, y, width, height);
2886 			drm_rect_rotate(&r,
2887 					info->plane[i].width * tile_width,
2888 					info->plane[i].height * tile_height,
2889 					DRM_MODE_ROTATE_270);
2890 			x = r.x1;
2891 			y = r.y1;
2892 
2893 			pitch_tiles = info->plane[i].height;
2894 			plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2895 
2896 			/* rotate the tile dimensions to match the GTT view */
2897 			swap(tile_width, tile_height);
2898 		} else {
2899 			pitch_tiles = info->plane[i].width;
2900 			plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2901 		}
2902 
2903 		/*
2904 		 * We only keep the x/y offsets, so push all of the
2905 		 * gtt offset into the x/y offsets.
2906 		 */
2907 		intel_adjust_tile_offset(&x, &y,
2908 					 tile_width, tile_height,
2909 					 tile_size, pitch_tiles,
2910 					 gtt_offset * tile_size, 0);
2911 
2912 		gtt_offset += info->plane[i].width * info->plane[i].height;
2913 
2914 		plane_state->color_plane[i].offset = 0;
2915 		plane_state->color_plane[i].x = x;
2916 		plane_state->color_plane[i].y = y;
2917 	}
2918 }
2919 
2920 static int
2921 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2922 {
2923 	const struct intel_framebuffer *fb =
2924 		to_intel_framebuffer(plane_state->base.fb);
2925 	unsigned int rotation = plane_state->base.rotation;
2926 	int i, num_planes;
2927 
2928 	if (!fb)
2929 		return 0;
2930 
2931 	num_planes = fb->base.format->num_planes;
2932 
2933 	if (intel_plane_needs_remap(plane_state)) {
2934 		intel_plane_remap_gtt(plane_state);
2935 
2936 		/*
2937 		 * Sometimes even remapping can't overcome
2938 		 * the stride limitations :( Can happen with
2939 		 * big plane sizes and suitably misaligned
2940 		 * offsets.
2941 		 */
2942 		return intel_plane_check_stride(plane_state);
2943 	}
2944 
2945 	intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2946 
2947 	for (i = 0; i < num_planes; i++) {
2948 		plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2949 		plane_state->color_plane[i].offset = 0;
2950 
2951 		if (drm_rotation_90_or_270(rotation)) {
2952 			plane_state->color_plane[i].x = fb->rotated[i].x;
2953 			plane_state->color_plane[i].y = fb->rotated[i].y;
2954 		} else {
2955 			plane_state->color_plane[i].x = fb->normal[i].x;
2956 			plane_state->color_plane[i].y = fb->normal[i].y;
2957 		}
2958 	}
2959 
2960 	/* Rotate src coordinates to match rotated GTT view */
2961 	if (drm_rotation_90_or_270(rotation))
2962 		drm_rect_rotate(&plane_state->base.src,
2963 				fb->base.width << 16, fb->base.height << 16,
2964 				DRM_MODE_ROTATE_270);
2965 
2966 	return intel_plane_check_stride(plane_state);
2967 }
2968 
2969 static int i9xx_format_to_fourcc(int format)
2970 {
2971 	switch (format) {
2972 	case DISPPLANE_8BPP:
2973 		return DRM_FORMAT_C8;
2974 	case DISPPLANE_BGRX555:
2975 		return DRM_FORMAT_XRGB1555;
2976 	case DISPPLANE_BGRX565:
2977 		return DRM_FORMAT_RGB565;
2978 	default:
2979 	case DISPPLANE_BGRX888:
2980 		return DRM_FORMAT_XRGB8888;
2981 	case DISPPLANE_RGBX888:
2982 		return DRM_FORMAT_XBGR8888;
2983 	case DISPPLANE_BGRX101010:
2984 		return DRM_FORMAT_XRGB2101010;
2985 	case DISPPLANE_RGBX101010:
2986 		return DRM_FORMAT_XBGR2101010;
2987 	case DISPPLANE_RGBX161616:
2988 		return DRM_FORMAT_XBGR16161616F;
2989 	}
2990 }
2991 
2992 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2993 {
2994 	switch (format) {
2995 	case PLANE_CTL_FORMAT_RGB_565:
2996 		return DRM_FORMAT_RGB565;
2997 	case PLANE_CTL_FORMAT_NV12:
2998 		return DRM_FORMAT_NV12;
2999 	case PLANE_CTL_FORMAT_P010:
3000 		return DRM_FORMAT_P010;
3001 	case PLANE_CTL_FORMAT_P012:
3002 		return DRM_FORMAT_P012;
3003 	case PLANE_CTL_FORMAT_P016:
3004 		return DRM_FORMAT_P016;
3005 	case PLANE_CTL_FORMAT_Y210:
3006 		return DRM_FORMAT_Y210;
3007 	case PLANE_CTL_FORMAT_Y212:
3008 		return DRM_FORMAT_Y212;
3009 	case PLANE_CTL_FORMAT_Y216:
3010 		return DRM_FORMAT_Y216;
3011 	case PLANE_CTL_FORMAT_Y410:
3012 		return DRM_FORMAT_XVYU2101010;
3013 	case PLANE_CTL_FORMAT_Y412:
3014 		return DRM_FORMAT_XVYU12_16161616;
3015 	case PLANE_CTL_FORMAT_Y416:
3016 		return DRM_FORMAT_XVYU16161616;
3017 	default:
3018 	case PLANE_CTL_FORMAT_XRGB_8888:
3019 		if (rgb_order) {
3020 			if (alpha)
3021 				return DRM_FORMAT_ABGR8888;
3022 			else
3023 				return DRM_FORMAT_XBGR8888;
3024 		} else {
3025 			if (alpha)
3026 				return DRM_FORMAT_ARGB8888;
3027 			else
3028 				return DRM_FORMAT_XRGB8888;
3029 		}
3030 	case PLANE_CTL_FORMAT_XRGB_2101010:
3031 		if (rgb_order)
3032 			return DRM_FORMAT_XBGR2101010;
3033 		else
3034 			return DRM_FORMAT_XRGB2101010;
3035 	case PLANE_CTL_FORMAT_XRGB_16161616F:
3036 		if (rgb_order) {
3037 			if (alpha)
3038 				return DRM_FORMAT_ABGR16161616F;
3039 			else
3040 				return DRM_FORMAT_XBGR16161616F;
3041 		} else {
3042 			if (alpha)
3043 				return DRM_FORMAT_ARGB16161616F;
3044 			else
3045 				return DRM_FORMAT_XRGB16161616F;
3046 		}
3047 	}
3048 }
3049 
3050 static bool
3051 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3052 			      struct intel_initial_plane_config *plane_config)
3053 {
3054 	struct drm_device *dev = crtc->base.dev;
3055 	struct drm_i915_private *dev_priv = to_i915(dev);
3056 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3057 	struct drm_framebuffer *fb = &plane_config->fb->base;
3058 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3059 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
3060 				    PAGE_SIZE);
3061 	struct drm_i915_gem_object *obj;
3062 	bool ret = false;
3063 
3064 	size_aligned -= base_aligned;
3065 
3066 	if (plane_config->size == 0)
3067 		return false;
3068 
3069 	/* If the FB is too big, just don't use it since fbdev is not very
3070 	 * important and we should probably use that space with FBC or other
3071 	 * features. */
3072 	if (size_aligned * 2 > dev_priv->stolen_usable_size)
3073 		return false;
3074 
3075 	switch (fb->modifier) {
3076 	case DRM_FORMAT_MOD_LINEAR:
3077 	case I915_FORMAT_MOD_X_TILED:
3078 	case I915_FORMAT_MOD_Y_TILED:
3079 		break;
3080 	default:
3081 		DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3082 				 fb->modifier);
3083 		return false;
3084 	}
3085 
3086 	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3087 							     base_aligned,
3088 							     base_aligned,
3089 							     size_aligned);
3090 	if (IS_ERR(obj))
3091 		return false;
3092 
3093 	switch (plane_config->tiling) {
3094 	case I915_TILING_NONE:
3095 		break;
3096 	case I915_TILING_X:
3097 	case I915_TILING_Y:
3098 		obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3099 		break;
3100 	default:
3101 		MISSING_CASE(plane_config->tiling);
3102 		goto out;
3103 	}
3104 
3105 	mode_cmd.pixel_format = fb->format->format;
3106 	mode_cmd.width = fb->width;
3107 	mode_cmd.height = fb->height;
3108 	mode_cmd.pitches[0] = fb->pitches[0];
3109 	mode_cmd.modifier[0] = fb->modifier;
3110 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3111 
3112 	if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3113 		DRM_DEBUG_KMS("intel fb init failed\n");
3114 		goto out;
3115 	}
3116 
3117 
3118 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3119 	ret = true;
3120 out:
3121 	i915_gem_object_put(obj);
3122 	return ret;
3123 }
3124 
3125 static void
3126 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3127 			struct intel_plane_state *plane_state,
3128 			bool visible)
3129 {
3130 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3131 
3132 	plane_state->base.visible = visible;
3133 
3134 	if (visible)
3135 		crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3136 	else
3137 		crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3138 }
3139 
3140 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3141 {
3142 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3143 	struct drm_plane *plane;
3144 
3145 	/*
3146 	 * Active_planes aliases if multiple "primary" or cursor planes
3147 	 * have been used on the same (or wrong) pipe. plane_mask uses
3148 	 * unique ids, hence we can use that to reconstruct active_planes.
3149 	 */
3150 	crtc_state->active_planes = 0;
3151 
3152 	drm_for_each_plane_mask(plane, &dev_priv->drm,
3153 				crtc_state->base.plane_mask)
3154 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3155 }
3156 
3157 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3158 					 struct intel_plane *plane)
3159 {
3160 	struct intel_crtc_state *crtc_state =
3161 		to_intel_crtc_state(crtc->base.state);
3162 	struct intel_plane_state *plane_state =
3163 		to_intel_plane_state(plane->base.state);
3164 
3165 	DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3166 		      plane->base.base.id, plane->base.name,
3167 		      crtc->base.base.id, crtc->base.name);
3168 
3169 	intel_set_plane_visible(crtc_state, plane_state, false);
3170 	fixup_active_planes(crtc_state);
3171 	crtc_state->data_rate[plane->id] = 0;
3172 	crtc_state->min_cdclk[plane->id] = 0;
3173 
3174 	if (plane->id == PLANE_PRIMARY)
3175 		intel_pre_disable_primary_noatomic(&crtc->base);
3176 
3177 	intel_disable_plane(plane, crtc_state);
3178 }
3179 
3180 static struct intel_frontbuffer *
3181 to_intel_frontbuffer(struct drm_framebuffer *fb)
3182 {
3183 	return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3184 }
3185 
3186 static void
3187 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3188 			     struct intel_initial_plane_config *plane_config)
3189 {
3190 	struct drm_device *dev = intel_crtc->base.dev;
3191 	struct drm_i915_private *dev_priv = to_i915(dev);
3192 	struct drm_crtc *c;
3193 	struct drm_plane *primary = intel_crtc->base.primary;
3194 	struct drm_plane_state *plane_state = primary->state;
3195 	struct intel_plane *intel_plane = to_intel_plane(primary);
3196 	struct intel_plane_state *intel_state =
3197 		to_intel_plane_state(plane_state);
3198 	struct drm_framebuffer *fb;
3199 
3200 	if (!plane_config->fb)
3201 		return;
3202 
3203 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3204 		fb = &plane_config->fb->base;
3205 		goto valid_fb;
3206 	}
3207 
3208 	kfree(plane_config->fb);
3209 
3210 	/*
3211 	 * Failed to alloc the obj, check to see if we should share
3212 	 * an fb with another CRTC instead
3213 	 */
3214 	for_each_crtc(dev, c) {
3215 		struct intel_plane_state *state;
3216 
3217 		if (c == &intel_crtc->base)
3218 			continue;
3219 
3220 		if (!to_intel_crtc(c)->active)
3221 			continue;
3222 
3223 		state = to_intel_plane_state(c->primary->state);
3224 		if (!state->vma)
3225 			continue;
3226 
3227 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
3228 			fb = state->base.fb;
3229 			drm_framebuffer_get(fb);
3230 			goto valid_fb;
3231 		}
3232 	}
3233 
3234 	/*
3235 	 * We've failed to reconstruct the BIOS FB.  Current display state
3236 	 * indicates that the primary plane is visible, but has a NULL FB,
3237 	 * which will lead to problems later if we don't fix it up.  The
3238 	 * simplest solution is to just disable the primary plane now and
3239 	 * pretend the BIOS never had it enabled.
3240 	 */
3241 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
3242 
3243 	return;
3244 
3245 valid_fb:
3246 	intel_state->base.rotation = plane_config->rotation;
3247 	intel_fill_fb_ggtt_view(&intel_state->view, fb,
3248 				intel_state->base.rotation);
3249 	intel_state->color_plane[0].stride =
3250 		intel_fb_pitch(fb, 0, intel_state->base.rotation);
3251 
3252 	intel_state->vma =
3253 		intel_pin_and_fence_fb_obj(fb,
3254 					   &intel_state->view,
3255 					   intel_plane_uses_fence(intel_state),
3256 					   &intel_state->flags);
3257 	if (IS_ERR(intel_state->vma)) {
3258 		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3259 			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
3260 
3261 		intel_state->vma = NULL;
3262 		drm_framebuffer_put(fb);
3263 		return;
3264 	}
3265 
3266 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3267 
3268 	plane_state->src_x = 0;
3269 	plane_state->src_y = 0;
3270 	plane_state->src_w = fb->width << 16;
3271 	plane_state->src_h = fb->height << 16;
3272 
3273 	plane_state->crtc_x = 0;
3274 	plane_state->crtc_y = 0;
3275 	plane_state->crtc_w = fb->width;
3276 	plane_state->crtc_h = fb->height;
3277 
3278 	intel_state->base.src = drm_plane_state_src(plane_state);
3279 	intel_state->base.dst = drm_plane_state_dest(plane_state);
3280 
3281 	if (plane_config->tiling)
3282 		dev_priv->preserve_bios_swizzle = true;
3283 
3284 	plane_state->fb = fb;
3285 	plane_state->crtc = &intel_crtc->base;
3286 
3287 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3288 		  &to_intel_frontbuffer(fb)->bits);
3289 }
3290 
3291 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3292 			       int color_plane,
3293 			       unsigned int rotation)
3294 {
3295 	int cpp = fb->format->cpp[color_plane];
3296 
3297 	switch (fb->modifier) {
3298 	case DRM_FORMAT_MOD_LINEAR:
3299 	case I915_FORMAT_MOD_X_TILED:
3300 		/*
3301 		 * Validated limit is 4k, but has 5k should
3302 		 * work apart from the following features:
3303 		 * - Ytile (already limited to 4k)
3304 		 * - FP16 (already limited to 4k)
3305 		 * - render compression (already limited to 4k)
3306 		 * - KVMR sprite and cursor (don't care)
3307 		 * - horizontal panning (TODO verify this)
3308 		 * - pipe and plane scaling (TODO verify this)
3309 		 */
3310 		if (cpp == 8)
3311 			return 4096;
3312 		else
3313 			return 5120;
3314 	case I915_FORMAT_MOD_Y_TILED_CCS:
3315 	case I915_FORMAT_MOD_Yf_TILED_CCS:
3316 		/* FIXME AUX plane? */
3317 	case I915_FORMAT_MOD_Y_TILED:
3318 	case I915_FORMAT_MOD_Yf_TILED:
3319 		if (cpp == 8)
3320 			return 2048;
3321 		else
3322 			return 4096;
3323 	default:
3324 		MISSING_CASE(fb->modifier);
3325 		return 2048;
3326 	}
3327 }
3328 
3329 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3330 			       int color_plane,
3331 			       unsigned int rotation)
3332 {
3333 	int cpp = fb->format->cpp[color_plane];
3334 
3335 	switch (fb->modifier) {
3336 	case DRM_FORMAT_MOD_LINEAR:
3337 	case I915_FORMAT_MOD_X_TILED:
3338 		if (cpp == 8)
3339 			return 4096;
3340 		else
3341 			return 5120;
3342 	case I915_FORMAT_MOD_Y_TILED_CCS:
3343 	case I915_FORMAT_MOD_Yf_TILED_CCS:
3344 		/* FIXME AUX plane? */
3345 	case I915_FORMAT_MOD_Y_TILED:
3346 	case I915_FORMAT_MOD_Yf_TILED:
3347 		if (cpp == 8)
3348 			return 2048;
3349 		else
3350 			return 5120;
3351 	default:
3352 		MISSING_CASE(fb->modifier);
3353 		return 2048;
3354 	}
3355 }
3356 
3357 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3358 			       int color_plane,
3359 			       unsigned int rotation)
3360 {
3361 	return 5120;
3362 }
3363 
3364 static int skl_max_plane_height(void)
3365 {
3366 	return 4096;
3367 }
3368 
3369 static int icl_max_plane_height(void)
3370 {
3371 	return 4320;
3372 }
3373 
3374 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3375 					   int main_x, int main_y, u32 main_offset)
3376 {
3377 	const struct drm_framebuffer *fb = plane_state->base.fb;
3378 	int hsub = fb->format->hsub;
3379 	int vsub = fb->format->vsub;
3380 	int aux_x = plane_state->color_plane[1].x;
3381 	int aux_y = plane_state->color_plane[1].y;
3382 	u32 aux_offset = plane_state->color_plane[1].offset;
3383 	u32 alignment = intel_surf_alignment(fb, 1);
3384 
3385 	while (aux_offset >= main_offset && aux_y <= main_y) {
3386 		int x, y;
3387 
3388 		if (aux_x == main_x && aux_y == main_y)
3389 			break;
3390 
3391 		if (aux_offset == 0)
3392 			break;
3393 
3394 		x = aux_x / hsub;
3395 		y = aux_y / vsub;
3396 		aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3397 							       aux_offset, aux_offset - alignment);
3398 		aux_x = x * hsub + aux_x % hsub;
3399 		aux_y = y * vsub + aux_y % vsub;
3400 	}
3401 
3402 	if (aux_x != main_x || aux_y != main_y)
3403 		return false;
3404 
3405 	plane_state->color_plane[1].offset = aux_offset;
3406 	plane_state->color_plane[1].x = aux_x;
3407 	plane_state->color_plane[1].y = aux_y;
3408 
3409 	return true;
3410 }
3411 
3412 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3413 {
3414 	struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3415 	const struct drm_framebuffer *fb = plane_state->base.fb;
3416 	unsigned int rotation = plane_state->base.rotation;
3417 	int x = plane_state->base.src.x1 >> 16;
3418 	int y = plane_state->base.src.y1 >> 16;
3419 	int w = drm_rect_width(&plane_state->base.src) >> 16;
3420 	int h = drm_rect_height(&plane_state->base.src) >> 16;
3421 	int max_width;
3422 	int max_height;
3423 	u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3424 
3425 	if (INTEL_GEN(dev_priv) >= 11)
3426 		max_width = icl_max_plane_width(fb, 0, rotation);
3427 	else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3428 		max_width = glk_max_plane_width(fb, 0, rotation);
3429 	else
3430 		max_width = skl_max_plane_width(fb, 0, rotation);
3431 
3432 	if (INTEL_GEN(dev_priv) >= 11)
3433 		max_height = icl_max_plane_height();
3434 	else
3435 		max_height = skl_max_plane_height();
3436 
3437 	if (w > max_width || h > max_height) {
3438 		DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3439 			      w, h, max_width, max_height);
3440 		return -EINVAL;
3441 	}
3442 
3443 	intel_add_fb_offsets(&x, &y, plane_state, 0);
3444 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3445 	alignment = intel_surf_alignment(fb, 0);
3446 
3447 	/*
3448 	 * AUX surface offset is specified as the distance from the
3449 	 * main surface offset, and it must be non-negative. Make
3450 	 * sure that is what we will get.
3451 	 */
3452 	if (offset > aux_offset)
3453 		offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3454 							   offset, aux_offset & ~(alignment - 1));
3455 
3456 	/*
3457 	 * When using an X-tiled surface, the plane blows up
3458 	 * if the x offset + width exceed the stride.
3459 	 *
3460 	 * TODO: linear and Y-tiled seem fine, Yf untested,
3461 	 */
3462 	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3463 		int cpp = fb->format->cpp[0];
3464 
3465 		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3466 			if (offset == 0) {
3467 				DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3468 				return -EINVAL;
3469 			}
3470 
3471 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3472 								   offset, offset - alignment);
3473 		}
3474 	}
3475 
3476 	/*
3477 	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3478 	 * they match with the main surface x/y offsets.
3479 	 */
3480 	if (is_ccs_modifier(fb->modifier)) {
3481 		while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3482 			if (offset == 0)
3483 				break;
3484 
3485 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3486 								   offset, offset - alignment);
3487 		}
3488 
3489 		if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3490 			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3491 			return -EINVAL;
3492 		}
3493 	}
3494 
3495 	plane_state->color_plane[0].offset = offset;
3496 	plane_state->color_plane[0].x = x;
3497 	plane_state->color_plane[0].y = y;
3498 
3499 	/*
3500 	 * Put the final coordinates back so that the src
3501 	 * coordinate checks will see the right values.
3502 	 */
3503 	drm_rect_translate_to(&plane_state->base.src,
3504 			      x << 16, y << 16);
3505 
3506 	return 0;
3507 }
3508 
3509 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3510 {
3511 	const struct drm_framebuffer *fb = plane_state->base.fb;
3512 	unsigned int rotation = plane_state->base.rotation;
3513 	int max_width = skl_max_plane_width(fb, 1, rotation);
3514 	int max_height = 4096;
3515 	int x = plane_state->base.src.x1 >> 17;
3516 	int y = plane_state->base.src.y1 >> 17;
3517 	int w = drm_rect_width(&plane_state->base.src) >> 17;
3518 	int h = drm_rect_height(&plane_state->base.src) >> 17;
3519 	u32 offset;
3520 
3521 	intel_add_fb_offsets(&x, &y, plane_state, 1);
3522 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3523 
3524 	/* FIXME not quite sure how/if these apply to the chroma plane */
3525 	if (w > max_width || h > max_height) {
3526 		DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3527 			      w, h, max_width, max_height);
3528 		return -EINVAL;
3529 	}
3530 
3531 	plane_state->color_plane[1].offset = offset;
3532 	plane_state->color_plane[1].x = x;
3533 	plane_state->color_plane[1].y = y;
3534 
3535 	return 0;
3536 }
3537 
3538 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3539 {
3540 	const struct drm_framebuffer *fb = plane_state->base.fb;
3541 	int src_x = plane_state->base.src.x1 >> 16;
3542 	int src_y = plane_state->base.src.y1 >> 16;
3543 	int hsub = fb->format->hsub;
3544 	int vsub = fb->format->vsub;
3545 	int x = src_x / hsub;
3546 	int y = src_y / vsub;
3547 	u32 offset;
3548 
3549 	intel_add_fb_offsets(&x, &y, plane_state, 1);
3550 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3551 
3552 	plane_state->color_plane[1].offset = offset;
3553 	plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3554 	plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3555 
3556 	return 0;
3557 }
3558 
3559 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3560 {
3561 	const struct drm_framebuffer *fb = plane_state->base.fb;
3562 	int ret;
3563 
3564 	ret = intel_plane_compute_gtt(plane_state);
3565 	if (ret)
3566 		return ret;
3567 
3568 	if (!plane_state->base.visible)
3569 		return 0;
3570 
3571 	/*
3572 	 * Handle the AUX surface first since
3573 	 * the main surface setup depends on it.
3574 	 */
3575 	if (drm_format_info_is_yuv_semiplanar(fb->format)) {
3576 		ret = skl_check_nv12_aux_surface(plane_state);
3577 		if (ret)
3578 			return ret;
3579 	} else if (is_ccs_modifier(fb->modifier)) {
3580 		ret = skl_check_ccs_aux_surface(plane_state);
3581 		if (ret)
3582 			return ret;
3583 	} else {
3584 		plane_state->color_plane[1].offset = ~0xfff;
3585 		plane_state->color_plane[1].x = 0;
3586 		plane_state->color_plane[1].y = 0;
3587 	}
3588 
3589 	ret = skl_check_main_surface(plane_state);
3590 	if (ret)
3591 		return ret;
3592 
3593 	return 0;
3594 }
3595 
3596 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
3597 			     const struct intel_plane_state *plane_state,
3598 			     unsigned int *num, unsigned int *den)
3599 {
3600 	const struct drm_framebuffer *fb = plane_state->base.fb;
3601 	unsigned int cpp = fb->format->cpp[0];
3602 
3603 	/*
3604 	 * g4x bspec says 64bpp pixel rate can't exceed 80%
3605 	 * of cdclk when the sprite plane is enabled on the
3606 	 * same pipe. ilk/snb bspec says 64bpp pixel rate is
3607 	 * never allowed to exceed 80% of cdclk. Let's just go
3608 	 * with the ilk/snb limit always.
3609 	 */
3610 	if (cpp == 8) {
3611 		*num = 10;
3612 		*den = 8;
3613 	} else {
3614 		*num = 1;
3615 		*den = 1;
3616 	}
3617 }
3618 
3619 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
3620 				const struct intel_plane_state *plane_state)
3621 {
3622 	unsigned int pixel_rate;
3623 	unsigned int num, den;
3624 
3625 	/*
3626 	 * Note that crtc_state->pixel_rate accounts for both
3627 	 * horizontal and vertical panel fitter downscaling factors.
3628 	 * Pre-HSW bspec tells us to only consider the horizontal
3629 	 * downscaling factor here. We ignore that and just consider
3630 	 * both for simplicity.
3631 	 */
3632 	pixel_rate = crtc_state->pixel_rate;
3633 
3634 	i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
3635 
3636 	/* two pixels per clock with double wide pipe */
3637 	if (crtc_state->double_wide)
3638 		den *= 2;
3639 
3640 	return DIV_ROUND_UP(pixel_rate * num, den);
3641 }
3642 
3643 unsigned int
3644 i9xx_plane_max_stride(struct intel_plane *plane,
3645 		      u32 pixel_format, u64 modifier,
3646 		      unsigned int rotation)
3647 {
3648 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3649 
3650 	if (!HAS_GMCH(dev_priv)) {
3651 		return 32*1024;
3652 	} else if (INTEL_GEN(dev_priv) >= 4) {
3653 		if (modifier == I915_FORMAT_MOD_X_TILED)
3654 			return 16*1024;
3655 		else
3656 			return 32*1024;
3657 	} else if (INTEL_GEN(dev_priv) >= 3) {
3658 		if (modifier == I915_FORMAT_MOD_X_TILED)
3659 			return 8*1024;
3660 		else
3661 			return 16*1024;
3662 	} else {
3663 		if (plane->i9xx_plane == PLANE_C)
3664 			return 4*1024;
3665 		else
3666 			return 8*1024;
3667 	}
3668 }
3669 
3670 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3671 {
3672 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3673 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3674 	u32 dspcntr = 0;
3675 
3676 	if (crtc_state->gamma_enable)
3677 		dspcntr |= DISPPLANE_GAMMA_ENABLE;
3678 
3679 	if (crtc_state->csc_enable)
3680 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3681 
3682 	if (INTEL_GEN(dev_priv) < 5)
3683 		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3684 
3685 	return dspcntr;
3686 }
3687 
3688 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3689 			  const struct intel_plane_state *plane_state)
3690 {
3691 	struct drm_i915_private *dev_priv =
3692 		to_i915(plane_state->base.plane->dev);
3693 	const struct drm_framebuffer *fb = plane_state->base.fb;
3694 	unsigned int rotation = plane_state->base.rotation;
3695 	u32 dspcntr;
3696 
3697 	dspcntr = DISPLAY_PLANE_ENABLE;
3698 
3699 	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3700 	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3701 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3702 
3703 	switch (fb->format->format) {
3704 	case DRM_FORMAT_C8:
3705 		dspcntr |= DISPPLANE_8BPP;
3706 		break;
3707 	case DRM_FORMAT_XRGB1555:
3708 		dspcntr |= DISPPLANE_BGRX555;
3709 		break;
3710 	case DRM_FORMAT_RGB565:
3711 		dspcntr |= DISPPLANE_BGRX565;
3712 		break;
3713 	case DRM_FORMAT_XRGB8888:
3714 		dspcntr |= DISPPLANE_BGRX888;
3715 		break;
3716 	case DRM_FORMAT_XBGR8888:
3717 		dspcntr |= DISPPLANE_RGBX888;
3718 		break;
3719 	case DRM_FORMAT_XRGB2101010:
3720 		dspcntr |= DISPPLANE_BGRX101010;
3721 		break;
3722 	case DRM_FORMAT_XBGR2101010:
3723 		dspcntr |= DISPPLANE_RGBX101010;
3724 		break;
3725 	case DRM_FORMAT_XBGR16161616F:
3726 		dspcntr |= DISPPLANE_RGBX161616;
3727 		break;
3728 	default:
3729 		MISSING_CASE(fb->format->format);
3730 		return 0;
3731 	}
3732 
3733 	if (INTEL_GEN(dev_priv) >= 4 &&
3734 	    fb->modifier == I915_FORMAT_MOD_X_TILED)
3735 		dspcntr |= DISPPLANE_TILED;
3736 
3737 	if (rotation & DRM_MODE_ROTATE_180)
3738 		dspcntr |= DISPPLANE_ROTATE_180;
3739 
3740 	if (rotation & DRM_MODE_REFLECT_X)
3741 		dspcntr |= DISPPLANE_MIRROR;
3742 
3743 	return dspcntr;
3744 }
3745 
3746 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3747 {
3748 	struct drm_i915_private *dev_priv =
3749 		to_i915(plane_state->base.plane->dev);
3750 	const struct drm_framebuffer *fb = plane_state->base.fb;
3751 	int src_x, src_y, src_w;
3752 	u32 offset;
3753 	int ret;
3754 
3755 	ret = intel_plane_compute_gtt(plane_state);
3756 	if (ret)
3757 		return ret;
3758 
3759 	if (!plane_state->base.visible)
3760 		return 0;
3761 
3762 	src_w = drm_rect_width(&plane_state->base.src) >> 16;
3763 	src_x = plane_state->base.src.x1 >> 16;
3764 	src_y = plane_state->base.src.y1 >> 16;
3765 
3766 	/* Undocumented hardware limit on i965/g4x/vlv/chv */
3767 	if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
3768 		return -EINVAL;
3769 
3770 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3771 
3772 	if (INTEL_GEN(dev_priv) >= 4)
3773 		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3774 							    plane_state, 0);
3775 	else
3776 		offset = 0;
3777 
3778 	/*
3779 	 * Put the final coordinates back so that the src
3780 	 * coordinate checks will see the right values.
3781 	 */
3782 	drm_rect_translate_to(&plane_state->base.src,
3783 			      src_x << 16, src_y << 16);
3784 
3785 	/* HSW/BDW do this automagically in hardware */
3786 	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3787 		unsigned int rotation = plane_state->base.rotation;
3788 		int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3789 		int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3790 
3791 		if (rotation & DRM_MODE_ROTATE_180) {
3792 			src_x += src_w - 1;
3793 			src_y += src_h - 1;
3794 		} else if (rotation & DRM_MODE_REFLECT_X) {
3795 			src_x += src_w - 1;
3796 		}
3797 	}
3798 
3799 	plane_state->color_plane[0].offset = offset;
3800 	plane_state->color_plane[0].x = src_x;
3801 	plane_state->color_plane[0].y = src_y;
3802 
3803 	return 0;
3804 }
3805 
3806 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
3807 {
3808 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3809 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3810 
3811 	if (IS_CHERRYVIEW(dev_priv))
3812 		return i9xx_plane == PLANE_B;
3813 	else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3814 		return false;
3815 	else if (IS_GEN(dev_priv, 4))
3816 		return i9xx_plane == PLANE_C;
3817 	else
3818 		return i9xx_plane == PLANE_B ||
3819 			i9xx_plane == PLANE_C;
3820 }
3821 
3822 static int
3823 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3824 		 struct intel_plane_state *plane_state)
3825 {
3826 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3827 	int ret;
3828 
3829 	ret = chv_plane_check_rotation(plane_state);
3830 	if (ret)
3831 		return ret;
3832 
3833 	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3834 						  &crtc_state->base,
3835 						  DRM_PLANE_HELPER_NO_SCALING,
3836 						  DRM_PLANE_HELPER_NO_SCALING,
3837 						  i9xx_plane_has_windowing(plane),
3838 						  true);
3839 	if (ret)
3840 		return ret;
3841 
3842 	ret = i9xx_check_plane_surface(plane_state);
3843 	if (ret)
3844 		return ret;
3845 
3846 	if (!plane_state->base.visible)
3847 		return 0;
3848 
3849 	ret = intel_plane_check_src_coordinates(plane_state);
3850 	if (ret)
3851 		return ret;
3852 
3853 	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3854 
3855 	return 0;
3856 }
3857 
3858 static void i9xx_update_plane(struct intel_plane *plane,
3859 			      const struct intel_crtc_state *crtc_state,
3860 			      const struct intel_plane_state *plane_state)
3861 {
3862 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3863 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3864 	u32 linear_offset;
3865 	int x = plane_state->color_plane[0].x;
3866 	int y = plane_state->color_plane[0].y;
3867 	int crtc_x = plane_state->base.dst.x1;
3868 	int crtc_y = plane_state->base.dst.y1;
3869 	int crtc_w = drm_rect_width(&plane_state->base.dst);
3870 	int crtc_h = drm_rect_height(&plane_state->base.dst);
3871 	unsigned long irqflags;
3872 	u32 dspaddr_offset;
3873 	u32 dspcntr;
3874 
3875 	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3876 
3877 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3878 
3879 	if (INTEL_GEN(dev_priv) >= 4)
3880 		dspaddr_offset = plane_state->color_plane[0].offset;
3881 	else
3882 		dspaddr_offset = linear_offset;
3883 
3884 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3885 
3886 	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3887 
3888 	if (INTEL_GEN(dev_priv) < 4) {
3889 		/*
3890 		 * PLANE_A doesn't actually have a full window
3891 		 * generator but let's assume we still need to
3892 		 * program whatever is there.
3893 		 */
3894 		I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3895 		I915_WRITE_FW(DSPSIZE(i9xx_plane),
3896 			      ((crtc_h - 1) << 16) | (crtc_w - 1));
3897 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3898 		I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
3899 		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3900 			      ((crtc_h - 1) << 16) | (crtc_w - 1));
3901 		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3902 	}
3903 
3904 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3905 		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3906 	} else if (INTEL_GEN(dev_priv) >= 4) {
3907 		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3908 		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3909 	}
3910 
3911 	/*
3912 	 * The control register self-arms if the plane was previously
3913 	 * disabled. Try to make the plane enable atomic by writing
3914 	 * the control register just before the surface register.
3915 	 */
3916 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3917 	if (INTEL_GEN(dev_priv) >= 4)
3918 		I915_WRITE_FW(DSPSURF(i9xx_plane),
3919 			      intel_plane_ggtt_offset(plane_state) +
3920 			      dspaddr_offset);
3921 	else
3922 		I915_WRITE_FW(DSPADDR(i9xx_plane),
3923 			      intel_plane_ggtt_offset(plane_state) +
3924 			      dspaddr_offset);
3925 
3926 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3927 }
3928 
3929 static void i9xx_disable_plane(struct intel_plane *plane,
3930 			       const struct intel_crtc_state *crtc_state)
3931 {
3932 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3933 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3934 	unsigned long irqflags;
3935 	u32 dspcntr;
3936 
3937 	/*
3938 	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3939 	 * enable on ilk+ affect the pipe bottom color as
3940 	 * well, so we must configure them even if the plane
3941 	 * is disabled.
3942 	 *
3943 	 * On pre-g4x there is no way to gamma correct the
3944 	 * pipe bottom color but we'll keep on doing this
3945 	 * anyway so that the crtc state readout works correctly.
3946 	 */
3947 	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3948 
3949 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3950 
3951 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3952 	if (INTEL_GEN(dev_priv) >= 4)
3953 		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3954 	else
3955 		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3956 
3957 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3958 }
3959 
3960 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3961 				    enum pipe *pipe)
3962 {
3963 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3964 	enum intel_display_power_domain power_domain;
3965 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3966 	intel_wakeref_t wakeref;
3967 	bool ret;
3968 	u32 val;
3969 
3970 	/*
3971 	 * Not 100% correct for planes that can move between pipes,
3972 	 * but that's only the case for gen2-4 which don't have any
3973 	 * display power wells.
3974 	 */
3975 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3976 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3977 	if (!wakeref)
3978 		return false;
3979 
3980 	val = I915_READ(DSPCNTR(i9xx_plane));
3981 
3982 	ret = val & DISPLAY_PLANE_ENABLE;
3983 
3984 	if (INTEL_GEN(dev_priv) >= 5)
3985 		*pipe = plane->pipe;
3986 	else
3987 		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3988 			DISPPLANE_SEL_PIPE_SHIFT;
3989 
3990 	intel_display_power_put(dev_priv, power_domain, wakeref);
3991 
3992 	return ret;
3993 }
3994 
3995 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3996 {
3997 	struct drm_device *dev = intel_crtc->base.dev;
3998 	struct drm_i915_private *dev_priv = to_i915(dev);
3999 
4000 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4001 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4002 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4003 }
4004 
4005 /*
4006  * This function detaches (aka. unbinds) unused scalers in hardware
4007  */
4008 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4009 {
4010 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4011 	const struct intel_crtc_scaler_state *scaler_state =
4012 		&crtc_state->scaler_state;
4013 	int i;
4014 
4015 	/* loop through and disable scalers that aren't in use */
4016 	for (i = 0; i < intel_crtc->num_scalers; i++) {
4017 		if (!scaler_state->scalers[i].in_use)
4018 			skl_detach_scaler(intel_crtc, i);
4019 	}
4020 }
4021 
4022 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4023 					  int color_plane, unsigned int rotation)
4024 {
4025 	/*
4026 	 * The stride is either expressed as a multiple of 64 bytes chunks for
4027 	 * linear buffers or in number of tiles for tiled buffers.
4028 	 */
4029 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
4030 		return 64;
4031 	else if (drm_rotation_90_or_270(rotation))
4032 		return intel_tile_height(fb, color_plane);
4033 	else
4034 		return intel_tile_width_bytes(fb, color_plane);
4035 }
4036 
4037 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4038 		     int color_plane)
4039 {
4040 	const struct drm_framebuffer *fb = plane_state->base.fb;
4041 	unsigned int rotation = plane_state->base.rotation;
4042 	u32 stride = plane_state->color_plane[color_plane].stride;
4043 
4044 	if (color_plane >= fb->format->num_planes)
4045 		return 0;
4046 
4047 	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4048 }
4049 
4050 static u32 skl_plane_ctl_format(u32 pixel_format)
4051 {
4052 	switch (pixel_format) {
4053 	case DRM_FORMAT_C8:
4054 		return PLANE_CTL_FORMAT_INDEXED;
4055 	case DRM_FORMAT_RGB565:
4056 		return PLANE_CTL_FORMAT_RGB_565;
4057 	case DRM_FORMAT_XBGR8888:
4058 	case DRM_FORMAT_ABGR8888:
4059 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4060 	case DRM_FORMAT_XRGB8888:
4061 	case DRM_FORMAT_ARGB8888:
4062 		return PLANE_CTL_FORMAT_XRGB_8888;
4063 	case DRM_FORMAT_XBGR2101010:
4064 		return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4065 	case DRM_FORMAT_XRGB2101010:
4066 		return PLANE_CTL_FORMAT_XRGB_2101010;
4067 	case DRM_FORMAT_XBGR16161616F:
4068 	case DRM_FORMAT_ABGR16161616F:
4069 		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4070 	case DRM_FORMAT_XRGB16161616F:
4071 	case DRM_FORMAT_ARGB16161616F:
4072 		return PLANE_CTL_FORMAT_XRGB_16161616F;
4073 	case DRM_FORMAT_YUYV:
4074 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4075 	case DRM_FORMAT_YVYU:
4076 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4077 	case DRM_FORMAT_UYVY:
4078 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4079 	case DRM_FORMAT_VYUY:
4080 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4081 	case DRM_FORMAT_NV12:
4082 		return PLANE_CTL_FORMAT_NV12;
4083 	case DRM_FORMAT_P010:
4084 		return PLANE_CTL_FORMAT_P010;
4085 	case DRM_FORMAT_P012:
4086 		return PLANE_CTL_FORMAT_P012;
4087 	case DRM_FORMAT_P016:
4088 		return PLANE_CTL_FORMAT_P016;
4089 	case DRM_FORMAT_Y210:
4090 		return PLANE_CTL_FORMAT_Y210;
4091 	case DRM_FORMAT_Y212:
4092 		return PLANE_CTL_FORMAT_Y212;
4093 	case DRM_FORMAT_Y216:
4094 		return PLANE_CTL_FORMAT_Y216;
4095 	case DRM_FORMAT_XVYU2101010:
4096 		return PLANE_CTL_FORMAT_Y410;
4097 	case DRM_FORMAT_XVYU12_16161616:
4098 		return PLANE_CTL_FORMAT_Y412;
4099 	case DRM_FORMAT_XVYU16161616:
4100 		return PLANE_CTL_FORMAT_Y416;
4101 	default:
4102 		MISSING_CASE(pixel_format);
4103 	}
4104 
4105 	return 0;
4106 }
4107 
4108 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4109 {
4110 	if (!plane_state->base.fb->format->has_alpha)
4111 		return PLANE_CTL_ALPHA_DISABLE;
4112 
4113 	switch (plane_state->base.pixel_blend_mode) {
4114 	case DRM_MODE_BLEND_PIXEL_NONE:
4115 		return PLANE_CTL_ALPHA_DISABLE;
4116 	case DRM_MODE_BLEND_PREMULTI:
4117 		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4118 	case DRM_MODE_BLEND_COVERAGE:
4119 		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4120 	default:
4121 		MISSING_CASE(plane_state->base.pixel_blend_mode);
4122 		return PLANE_CTL_ALPHA_DISABLE;
4123 	}
4124 }
4125 
4126 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4127 {
4128 	if (!plane_state->base.fb->format->has_alpha)
4129 		return PLANE_COLOR_ALPHA_DISABLE;
4130 
4131 	switch (plane_state->base.pixel_blend_mode) {
4132 	case DRM_MODE_BLEND_PIXEL_NONE:
4133 		return PLANE_COLOR_ALPHA_DISABLE;
4134 	case DRM_MODE_BLEND_PREMULTI:
4135 		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4136 	case DRM_MODE_BLEND_COVERAGE:
4137 		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4138 	default:
4139 		MISSING_CASE(plane_state->base.pixel_blend_mode);
4140 		return PLANE_COLOR_ALPHA_DISABLE;
4141 	}
4142 }
4143 
4144 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4145 {
4146 	switch (fb_modifier) {
4147 	case DRM_FORMAT_MOD_LINEAR:
4148 		break;
4149 	case I915_FORMAT_MOD_X_TILED:
4150 		return PLANE_CTL_TILED_X;
4151 	case I915_FORMAT_MOD_Y_TILED:
4152 		return PLANE_CTL_TILED_Y;
4153 	case I915_FORMAT_MOD_Y_TILED_CCS:
4154 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4155 	case I915_FORMAT_MOD_Yf_TILED:
4156 		return PLANE_CTL_TILED_YF;
4157 	case I915_FORMAT_MOD_Yf_TILED_CCS:
4158 		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4159 	default:
4160 		MISSING_CASE(fb_modifier);
4161 	}
4162 
4163 	return 0;
4164 }
4165 
4166 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4167 {
4168 	switch (rotate) {
4169 	case DRM_MODE_ROTATE_0:
4170 		break;
4171 	/*
4172 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4173 	 * while i915 HW rotation is clockwise, thats why this swapping.
4174 	 */
4175 	case DRM_MODE_ROTATE_90:
4176 		return PLANE_CTL_ROTATE_270;
4177 	case DRM_MODE_ROTATE_180:
4178 		return PLANE_CTL_ROTATE_180;
4179 	case DRM_MODE_ROTATE_270:
4180 		return PLANE_CTL_ROTATE_90;
4181 	default:
4182 		MISSING_CASE(rotate);
4183 	}
4184 
4185 	return 0;
4186 }
4187 
4188 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4189 {
4190 	switch (reflect) {
4191 	case 0:
4192 		break;
4193 	case DRM_MODE_REFLECT_X:
4194 		return PLANE_CTL_FLIP_HORIZONTAL;
4195 	case DRM_MODE_REFLECT_Y:
4196 	default:
4197 		MISSING_CASE(reflect);
4198 	}
4199 
4200 	return 0;
4201 }
4202 
4203 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4204 {
4205 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4206 	u32 plane_ctl = 0;
4207 
4208 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4209 		return plane_ctl;
4210 
4211 	if (crtc_state->gamma_enable)
4212 		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4213 
4214 	if (crtc_state->csc_enable)
4215 		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4216 
4217 	return plane_ctl;
4218 }
4219 
4220 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4221 		  const struct intel_plane_state *plane_state)
4222 {
4223 	struct drm_i915_private *dev_priv =
4224 		to_i915(plane_state->base.plane->dev);
4225 	const struct drm_framebuffer *fb = plane_state->base.fb;
4226 	unsigned int rotation = plane_state->base.rotation;
4227 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4228 	u32 plane_ctl;
4229 
4230 	plane_ctl = PLANE_CTL_ENABLE;
4231 
4232 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4233 		plane_ctl |= skl_plane_ctl_alpha(plane_state);
4234 		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4235 
4236 		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4237 			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4238 
4239 		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4240 			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4241 	}
4242 
4243 	plane_ctl |= skl_plane_ctl_format(fb->format->format);
4244 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4245 	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4246 
4247 	if (INTEL_GEN(dev_priv) >= 10)
4248 		plane_ctl |= cnl_plane_ctl_flip(rotation &
4249 						DRM_MODE_REFLECT_MASK);
4250 
4251 	if (key->flags & I915_SET_COLORKEY_DESTINATION)
4252 		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4253 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
4254 		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4255 
4256 	return plane_ctl;
4257 }
4258 
4259 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4260 {
4261 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4262 	u32 plane_color_ctl = 0;
4263 
4264 	if (INTEL_GEN(dev_priv) >= 11)
4265 		return plane_color_ctl;
4266 
4267 	if (crtc_state->gamma_enable)
4268 		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4269 
4270 	if (crtc_state->csc_enable)
4271 		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4272 
4273 	return plane_color_ctl;
4274 }
4275 
4276 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4277 			const struct intel_plane_state *plane_state)
4278 {
4279 	struct drm_i915_private *dev_priv =
4280 		to_i915(plane_state->base.plane->dev);
4281 	const struct drm_framebuffer *fb = plane_state->base.fb;
4282 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4283 	u32 plane_color_ctl = 0;
4284 
4285 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4286 	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4287 
4288 	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4289 		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4290 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4291 		else
4292 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4293 
4294 		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4295 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4296 	} else if (fb->format->is_yuv) {
4297 		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4298 	}
4299 
4300 	return plane_color_ctl;
4301 }
4302 
4303 static int
4304 __intel_display_resume(struct drm_device *dev,
4305 		       struct drm_atomic_state *state,
4306 		       struct drm_modeset_acquire_ctx *ctx)
4307 {
4308 	struct drm_crtc_state *crtc_state;
4309 	struct drm_crtc *crtc;
4310 	int i, ret;
4311 
4312 	intel_modeset_setup_hw_state(dev, ctx);
4313 	intel_vga_redisable(to_i915(dev));
4314 
4315 	if (!state)
4316 		return 0;
4317 
4318 	/*
4319 	 * We've duplicated the state, pointers to the old state are invalid.
4320 	 *
4321 	 * Don't attempt to use the old state until we commit the duplicated state.
4322 	 */
4323 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4324 		/*
4325 		 * Force recalculation even if we restore
4326 		 * current state. With fast modeset this may not result
4327 		 * in a modeset when the state is compatible.
4328 		 */
4329 		crtc_state->mode_changed = true;
4330 	}
4331 
4332 	/* ignore any reset values/BIOS leftovers in the WM registers */
4333 	if (!HAS_GMCH(to_i915(dev)))
4334 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
4335 
4336 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4337 
4338 	WARN_ON(ret == -EDEADLK);
4339 	return ret;
4340 }
4341 
4342 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4343 {
4344 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4345 		intel_has_gpu_reset(&dev_priv->gt));
4346 }
4347 
4348 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4349 {
4350 	struct drm_device *dev = &dev_priv->drm;
4351 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4352 	struct drm_atomic_state *state;
4353 	int ret;
4354 
4355 	/* reset doesn't touch the display */
4356 	if (!i915_modparams.force_reset_modeset_test &&
4357 	    !gpu_reset_clobbers_display(dev_priv))
4358 		return;
4359 
4360 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
4361 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4362 	smp_mb__after_atomic();
4363 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4364 
4365 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4366 		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4367 		intel_gt_set_wedged(&dev_priv->gt);
4368 	}
4369 
4370 	/*
4371 	 * Need mode_config.mutex so that we don't
4372 	 * trample ongoing ->detect() and whatnot.
4373 	 */
4374 	mutex_lock(&dev->mode_config.mutex);
4375 	drm_modeset_acquire_init(ctx, 0);
4376 	while (1) {
4377 		ret = drm_modeset_lock_all_ctx(dev, ctx);
4378 		if (ret != -EDEADLK)
4379 			break;
4380 
4381 		drm_modeset_backoff(ctx);
4382 	}
4383 	/*
4384 	 * Disabling the crtcs gracefully seems nicer. Also the
4385 	 * g33 docs say we should at least disable all the planes.
4386 	 */
4387 	state = drm_atomic_helper_duplicate_state(dev, ctx);
4388 	if (IS_ERR(state)) {
4389 		ret = PTR_ERR(state);
4390 		DRM_ERROR("Duplicating state failed with %i\n", ret);
4391 		return;
4392 	}
4393 
4394 	ret = drm_atomic_helper_disable_all(dev, ctx);
4395 	if (ret) {
4396 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4397 		drm_atomic_state_put(state);
4398 		return;
4399 	}
4400 
4401 	dev_priv->modeset_restore_state = state;
4402 	state->acquire_ctx = ctx;
4403 }
4404 
4405 void intel_finish_reset(struct drm_i915_private *dev_priv)
4406 {
4407 	struct drm_device *dev = &dev_priv->drm;
4408 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4409 	struct drm_atomic_state *state;
4410 	int ret;
4411 
4412 	/* reset doesn't touch the display */
4413 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4414 		return;
4415 
4416 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
4417 	if (!state)
4418 		goto unlock;
4419 
4420 	/* reset doesn't touch the display */
4421 	if (!gpu_reset_clobbers_display(dev_priv)) {
4422 		/* for testing only restore the display */
4423 		ret = __intel_display_resume(dev, state, ctx);
4424 		if (ret)
4425 			DRM_ERROR("Restoring old state failed with %i\n", ret);
4426 	} else {
4427 		/*
4428 		 * The display has been reset as well,
4429 		 * so need a full re-initialization.
4430 		 */
4431 		intel_pps_unlock_regs_wa(dev_priv);
4432 		intel_modeset_init_hw(dev_priv);
4433 		intel_init_clock_gating(dev_priv);
4434 
4435 		spin_lock_irq(&dev_priv->irq_lock);
4436 		if (dev_priv->display.hpd_irq_setup)
4437 			dev_priv->display.hpd_irq_setup(dev_priv);
4438 		spin_unlock_irq(&dev_priv->irq_lock);
4439 
4440 		ret = __intel_display_resume(dev, state, ctx);
4441 		if (ret)
4442 			DRM_ERROR("Restoring old state failed with %i\n", ret);
4443 
4444 		intel_hpd_init(dev_priv);
4445 	}
4446 
4447 	drm_atomic_state_put(state);
4448 unlock:
4449 	drm_modeset_drop_locks(ctx);
4450 	drm_modeset_acquire_fini(ctx);
4451 	mutex_unlock(&dev->mode_config.mutex);
4452 
4453 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4454 }
4455 
4456 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4457 {
4458 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4459 	enum pipe pipe = crtc->pipe;
4460 	u32 tmp;
4461 
4462 	tmp = I915_READ(PIPE_CHICKEN(pipe));
4463 
4464 	/*
4465 	 * Display WA #1153: icl
4466 	 * enable hardware to bypass the alpha math
4467 	 * and rounding for per-pixel values 00 and 0xff
4468 	 */
4469 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4470 	/*
4471 	 * Display WA # 1605353570: icl
4472 	 * Set the pixel rounding bit to 1 for allowing
4473 	 * passthrough of Frame buffer pixels unmodified
4474 	 * across pipe
4475 	 */
4476 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4477 	I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4478 }
4479 
4480 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4481 {
4482 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4483 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4484 	u32 trans_ddi_func_ctl2_val;
4485 	u8 master_select;
4486 
4487 	/*
4488 	 * Configure the master select and enable Transcoder Port Sync for
4489 	 * Slave CRTCs transcoder.
4490 	 */
4491 	if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4492 		return;
4493 
4494 	if (crtc_state->master_transcoder == TRANSCODER_EDP)
4495 		master_select = 0;
4496 	else
4497 		master_select = crtc_state->master_transcoder + 1;
4498 
4499 	/* Set the master select bits for Tranascoder Port Sync */
4500 	trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4501 				   PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4502 		PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4503 	/* Enable Transcoder Port Sync */
4504 	trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4505 
4506 	I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4507 		   trans_ddi_func_ctl2_val);
4508 }
4509 
4510 static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
4511 {
4512 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4513 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4514 	i915_reg_t reg;
4515 	u32 trans_ddi_func_ctl2_val;
4516 
4517 	if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
4518 		return;
4519 
4520 	DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
4521 		      transcoder_name(old_crtc_state->cpu_transcoder));
4522 
4523 	reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
4524 	trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
4525 				    PORT_SYNC_MODE_MASTER_SELECT_MASK);
4526 	I915_WRITE(reg, trans_ddi_func_ctl2_val);
4527 }
4528 
4529 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4530 {
4531 	struct drm_device *dev = crtc->base.dev;
4532 	struct drm_i915_private *dev_priv = to_i915(dev);
4533 	enum pipe pipe = crtc->pipe;
4534 	i915_reg_t reg;
4535 	u32 temp;
4536 
4537 	/* enable normal train */
4538 	reg = FDI_TX_CTL(pipe);
4539 	temp = I915_READ(reg);
4540 	if (IS_IVYBRIDGE(dev_priv)) {
4541 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4542 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4543 	} else {
4544 		temp &= ~FDI_LINK_TRAIN_NONE;
4545 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4546 	}
4547 	I915_WRITE(reg, temp);
4548 
4549 	reg = FDI_RX_CTL(pipe);
4550 	temp = I915_READ(reg);
4551 	if (HAS_PCH_CPT(dev_priv)) {
4552 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4553 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4554 	} else {
4555 		temp &= ~FDI_LINK_TRAIN_NONE;
4556 		temp |= FDI_LINK_TRAIN_NONE;
4557 	}
4558 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4559 
4560 	/* wait one idle pattern time */
4561 	POSTING_READ(reg);
4562 	udelay(1000);
4563 
4564 	/* IVB wants error correction enabled */
4565 	if (IS_IVYBRIDGE(dev_priv))
4566 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4567 			   FDI_FE_ERRC_ENABLE);
4568 }
4569 
4570 /* The FDI link training functions for ILK/Ibexpeak. */
4571 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4572 				    const struct intel_crtc_state *crtc_state)
4573 {
4574 	struct drm_device *dev = crtc->base.dev;
4575 	struct drm_i915_private *dev_priv = to_i915(dev);
4576 	enum pipe pipe = crtc->pipe;
4577 	i915_reg_t reg;
4578 	u32 temp, tries;
4579 
4580 	/* FDI needs bits from pipe first */
4581 	assert_pipe_enabled(dev_priv, pipe);
4582 
4583 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4584 	   for train result */
4585 	reg = FDI_RX_IMR(pipe);
4586 	temp = I915_READ(reg);
4587 	temp &= ~FDI_RX_SYMBOL_LOCK;
4588 	temp &= ~FDI_RX_BIT_LOCK;
4589 	I915_WRITE(reg, temp);
4590 	I915_READ(reg);
4591 	udelay(150);
4592 
4593 	/* enable CPU FDI TX and PCH FDI RX */
4594 	reg = FDI_TX_CTL(pipe);
4595 	temp = I915_READ(reg);
4596 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
4597 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4598 	temp &= ~FDI_LINK_TRAIN_NONE;
4599 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4600 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
4601 
4602 	reg = FDI_RX_CTL(pipe);
4603 	temp = I915_READ(reg);
4604 	temp &= ~FDI_LINK_TRAIN_NONE;
4605 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4606 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
4607 
4608 	POSTING_READ(reg);
4609 	udelay(150);
4610 
4611 	/* Ironlake workaround, enable clock pointer after FDI enable*/
4612 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4613 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4614 		   FDI_RX_PHASE_SYNC_POINTER_EN);
4615 
4616 	reg = FDI_RX_IIR(pipe);
4617 	for (tries = 0; tries < 5; tries++) {
4618 		temp = I915_READ(reg);
4619 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4620 
4621 		if ((temp & FDI_RX_BIT_LOCK)) {
4622 			DRM_DEBUG_KMS("FDI train 1 done.\n");
4623 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4624 			break;
4625 		}
4626 	}
4627 	if (tries == 5)
4628 		DRM_ERROR("FDI train 1 fail!\n");
4629 
4630 	/* Train 2 */
4631 	reg = FDI_TX_CTL(pipe);
4632 	temp = I915_READ(reg);
4633 	temp &= ~FDI_LINK_TRAIN_NONE;
4634 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4635 	I915_WRITE(reg, temp);
4636 
4637 	reg = FDI_RX_CTL(pipe);
4638 	temp = I915_READ(reg);
4639 	temp &= ~FDI_LINK_TRAIN_NONE;
4640 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4641 	I915_WRITE(reg, temp);
4642 
4643 	POSTING_READ(reg);
4644 	udelay(150);
4645 
4646 	reg = FDI_RX_IIR(pipe);
4647 	for (tries = 0; tries < 5; tries++) {
4648 		temp = I915_READ(reg);
4649 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4650 
4651 		if (temp & FDI_RX_SYMBOL_LOCK) {
4652 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4653 			DRM_DEBUG_KMS("FDI train 2 done.\n");
4654 			break;
4655 		}
4656 	}
4657 	if (tries == 5)
4658 		DRM_ERROR("FDI train 2 fail!\n");
4659 
4660 	DRM_DEBUG_KMS("FDI train done\n");
4661 
4662 }
4663 
4664 static const int snb_b_fdi_train_param[] = {
4665 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4666 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4667 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4668 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4669 };
4670 
4671 /* The FDI link training functions for SNB/Cougarpoint. */
4672 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4673 				const struct intel_crtc_state *crtc_state)
4674 {
4675 	struct drm_device *dev = crtc->base.dev;
4676 	struct drm_i915_private *dev_priv = to_i915(dev);
4677 	enum pipe pipe = crtc->pipe;
4678 	i915_reg_t reg;
4679 	u32 temp, i, retry;
4680 
4681 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4682 	   for train result */
4683 	reg = FDI_RX_IMR(pipe);
4684 	temp = I915_READ(reg);
4685 	temp &= ~FDI_RX_SYMBOL_LOCK;
4686 	temp &= ~FDI_RX_BIT_LOCK;
4687 	I915_WRITE(reg, temp);
4688 
4689 	POSTING_READ(reg);
4690 	udelay(150);
4691 
4692 	/* enable CPU FDI TX and PCH FDI RX */
4693 	reg = FDI_TX_CTL(pipe);
4694 	temp = I915_READ(reg);
4695 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
4696 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4697 	temp &= ~FDI_LINK_TRAIN_NONE;
4698 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4699 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4700 	/* SNB-B */
4701 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4702 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
4703 
4704 	I915_WRITE(FDI_RX_MISC(pipe),
4705 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4706 
4707 	reg = FDI_RX_CTL(pipe);
4708 	temp = I915_READ(reg);
4709 	if (HAS_PCH_CPT(dev_priv)) {
4710 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4711 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4712 	} else {
4713 		temp &= ~FDI_LINK_TRAIN_NONE;
4714 		temp |= FDI_LINK_TRAIN_PATTERN_1;
4715 	}
4716 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
4717 
4718 	POSTING_READ(reg);
4719 	udelay(150);
4720 
4721 	for (i = 0; i < 4; i++) {
4722 		reg = FDI_TX_CTL(pipe);
4723 		temp = I915_READ(reg);
4724 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4725 		temp |= snb_b_fdi_train_param[i];
4726 		I915_WRITE(reg, temp);
4727 
4728 		POSTING_READ(reg);
4729 		udelay(500);
4730 
4731 		for (retry = 0; retry < 5; retry++) {
4732 			reg = FDI_RX_IIR(pipe);
4733 			temp = I915_READ(reg);
4734 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4735 			if (temp & FDI_RX_BIT_LOCK) {
4736 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4737 				DRM_DEBUG_KMS("FDI train 1 done.\n");
4738 				break;
4739 			}
4740 			udelay(50);
4741 		}
4742 		if (retry < 5)
4743 			break;
4744 	}
4745 	if (i == 4)
4746 		DRM_ERROR("FDI train 1 fail!\n");
4747 
4748 	/* Train 2 */
4749 	reg = FDI_TX_CTL(pipe);
4750 	temp = I915_READ(reg);
4751 	temp &= ~FDI_LINK_TRAIN_NONE;
4752 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4753 	if (IS_GEN(dev_priv, 6)) {
4754 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4755 		/* SNB-B */
4756 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4757 	}
4758 	I915_WRITE(reg, temp);
4759 
4760 	reg = FDI_RX_CTL(pipe);
4761 	temp = I915_READ(reg);
4762 	if (HAS_PCH_CPT(dev_priv)) {
4763 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4764 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4765 	} else {
4766 		temp &= ~FDI_LINK_TRAIN_NONE;
4767 		temp |= FDI_LINK_TRAIN_PATTERN_2;
4768 	}
4769 	I915_WRITE(reg, temp);
4770 
4771 	POSTING_READ(reg);
4772 	udelay(150);
4773 
4774 	for (i = 0; i < 4; i++) {
4775 		reg = FDI_TX_CTL(pipe);
4776 		temp = I915_READ(reg);
4777 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4778 		temp |= snb_b_fdi_train_param[i];
4779 		I915_WRITE(reg, temp);
4780 
4781 		POSTING_READ(reg);
4782 		udelay(500);
4783 
4784 		for (retry = 0; retry < 5; retry++) {
4785 			reg = FDI_RX_IIR(pipe);
4786 			temp = I915_READ(reg);
4787 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4788 			if (temp & FDI_RX_SYMBOL_LOCK) {
4789 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4790 				DRM_DEBUG_KMS("FDI train 2 done.\n");
4791 				break;
4792 			}
4793 			udelay(50);
4794 		}
4795 		if (retry < 5)
4796 			break;
4797 	}
4798 	if (i == 4)
4799 		DRM_ERROR("FDI train 2 fail!\n");
4800 
4801 	DRM_DEBUG_KMS("FDI train done.\n");
4802 }
4803 
4804 /* Manual link training for Ivy Bridge A0 parts */
4805 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4806 				      const struct intel_crtc_state *crtc_state)
4807 {
4808 	struct drm_device *dev = crtc->base.dev;
4809 	struct drm_i915_private *dev_priv = to_i915(dev);
4810 	enum pipe pipe = crtc->pipe;
4811 	i915_reg_t reg;
4812 	u32 temp, i, j;
4813 
4814 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4815 	   for train result */
4816 	reg = FDI_RX_IMR(pipe);
4817 	temp = I915_READ(reg);
4818 	temp &= ~FDI_RX_SYMBOL_LOCK;
4819 	temp &= ~FDI_RX_BIT_LOCK;
4820 	I915_WRITE(reg, temp);
4821 
4822 	POSTING_READ(reg);
4823 	udelay(150);
4824 
4825 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4826 		      I915_READ(FDI_RX_IIR(pipe)));
4827 
4828 	/* Try each vswing and preemphasis setting twice before moving on */
4829 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4830 		/* disable first in case we need to retry */
4831 		reg = FDI_TX_CTL(pipe);
4832 		temp = I915_READ(reg);
4833 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4834 		temp &= ~FDI_TX_ENABLE;
4835 		I915_WRITE(reg, temp);
4836 
4837 		reg = FDI_RX_CTL(pipe);
4838 		temp = I915_READ(reg);
4839 		temp &= ~FDI_LINK_TRAIN_AUTO;
4840 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4841 		temp &= ~FDI_RX_ENABLE;
4842 		I915_WRITE(reg, temp);
4843 
4844 		/* enable CPU FDI TX and PCH FDI RX */
4845 		reg = FDI_TX_CTL(pipe);
4846 		temp = I915_READ(reg);
4847 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
4848 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4849 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4850 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4851 		temp |= snb_b_fdi_train_param[j/2];
4852 		temp |= FDI_COMPOSITE_SYNC;
4853 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
4854 
4855 		I915_WRITE(FDI_RX_MISC(pipe),
4856 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4857 
4858 		reg = FDI_RX_CTL(pipe);
4859 		temp = I915_READ(reg);
4860 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4861 		temp |= FDI_COMPOSITE_SYNC;
4862 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
4863 
4864 		POSTING_READ(reg);
4865 		udelay(1); /* should be 0.5us */
4866 
4867 		for (i = 0; i < 4; i++) {
4868 			reg = FDI_RX_IIR(pipe);
4869 			temp = I915_READ(reg);
4870 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4871 
4872 			if (temp & FDI_RX_BIT_LOCK ||
4873 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4874 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4875 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4876 					      i);
4877 				break;
4878 			}
4879 			udelay(1); /* should be 0.5us */
4880 		}
4881 		if (i == 4) {
4882 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4883 			continue;
4884 		}
4885 
4886 		/* Train 2 */
4887 		reg = FDI_TX_CTL(pipe);
4888 		temp = I915_READ(reg);
4889 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4890 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4891 		I915_WRITE(reg, temp);
4892 
4893 		reg = FDI_RX_CTL(pipe);
4894 		temp = I915_READ(reg);
4895 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4896 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4897 		I915_WRITE(reg, temp);
4898 
4899 		POSTING_READ(reg);
4900 		udelay(2); /* should be 1.5us */
4901 
4902 		for (i = 0; i < 4; i++) {
4903 			reg = FDI_RX_IIR(pipe);
4904 			temp = I915_READ(reg);
4905 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4906 
4907 			if (temp & FDI_RX_SYMBOL_LOCK ||
4908 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4909 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4910 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4911 					      i);
4912 				goto train_done;
4913 			}
4914 			udelay(2); /* should be 1.5us */
4915 		}
4916 		if (i == 4)
4917 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4918 	}
4919 
4920 train_done:
4921 	DRM_DEBUG_KMS("FDI train done.\n");
4922 }
4923 
4924 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4925 {
4926 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4927 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4928 	enum pipe pipe = intel_crtc->pipe;
4929 	i915_reg_t reg;
4930 	u32 temp;
4931 
4932 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4933 	reg = FDI_RX_CTL(pipe);
4934 	temp = I915_READ(reg);
4935 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4936 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4937 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4938 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4939 
4940 	POSTING_READ(reg);
4941 	udelay(200);
4942 
4943 	/* Switch from Rawclk to PCDclk */
4944 	temp = I915_READ(reg);
4945 	I915_WRITE(reg, temp | FDI_PCDCLK);
4946 
4947 	POSTING_READ(reg);
4948 	udelay(200);
4949 
4950 	/* Enable CPU FDI TX PLL, always on for Ironlake */
4951 	reg = FDI_TX_CTL(pipe);
4952 	temp = I915_READ(reg);
4953 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4954 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4955 
4956 		POSTING_READ(reg);
4957 		udelay(100);
4958 	}
4959 }
4960 
4961 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4962 {
4963 	struct drm_device *dev = intel_crtc->base.dev;
4964 	struct drm_i915_private *dev_priv = to_i915(dev);
4965 	enum pipe pipe = intel_crtc->pipe;
4966 	i915_reg_t reg;
4967 	u32 temp;
4968 
4969 	/* Switch from PCDclk to Rawclk */
4970 	reg = FDI_RX_CTL(pipe);
4971 	temp = I915_READ(reg);
4972 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
4973 
4974 	/* Disable CPU FDI TX PLL */
4975 	reg = FDI_TX_CTL(pipe);
4976 	temp = I915_READ(reg);
4977 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4978 
4979 	POSTING_READ(reg);
4980 	udelay(100);
4981 
4982 	reg = FDI_RX_CTL(pipe);
4983 	temp = I915_READ(reg);
4984 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4985 
4986 	/* Wait for the clocks to turn off. */
4987 	POSTING_READ(reg);
4988 	udelay(100);
4989 }
4990 
4991 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4992 {
4993 	struct drm_device *dev = crtc->dev;
4994 	struct drm_i915_private *dev_priv = to_i915(dev);
4995 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4996 	enum pipe pipe = intel_crtc->pipe;
4997 	i915_reg_t reg;
4998 	u32 temp;
4999 
5000 	/* disable CPU FDI tx and PCH FDI rx */
5001 	reg = FDI_TX_CTL(pipe);
5002 	temp = I915_READ(reg);
5003 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
5004 	POSTING_READ(reg);
5005 
5006 	reg = FDI_RX_CTL(pipe);
5007 	temp = I915_READ(reg);
5008 	temp &= ~(0x7 << 16);
5009 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5010 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
5011 
5012 	POSTING_READ(reg);
5013 	udelay(100);
5014 
5015 	/* Ironlake workaround, disable clock pointer after downing FDI */
5016 	if (HAS_PCH_IBX(dev_priv))
5017 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
5018 
5019 	/* still set train pattern 1 */
5020 	reg = FDI_TX_CTL(pipe);
5021 	temp = I915_READ(reg);
5022 	temp &= ~FDI_LINK_TRAIN_NONE;
5023 	temp |= FDI_LINK_TRAIN_PATTERN_1;
5024 	I915_WRITE(reg, temp);
5025 
5026 	reg = FDI_RX_CTL(pipe);
5027 	temp = I915_READ(reg);
5028 	if (HAS_PCH_CPT(dev_priv)) {
5029 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5030 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5031 	} else {
5032 		temp &= ~FDI_LINK_TRAIN_NONE;
5033 		temp |= FDI_LINK_TRAIN_PATTERN_1;
5034 	}
5035 	/* BPC in FDI rx is consistent with that in PIPECONF */
5036 	temp &= ~(0x07 << 16);
5037 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5038 	I915_WRITE(reg, temp);
5039 
5040 	POSTING_READ(reg);
5041 	udelay(100);
5042 }
5043 
5044 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5045 {
5046 	struct drm_crtc *crtc;
5047 	bool cleanup_done;
5048 
5049 	drm_for_each_crtc(crtc, &dev_priv->drm) {
5050 		struct drm_crtc_commit *commit;
5051 		spin_lock(&crtc->commit_lock);
5052 		commit = list_first_entry_or_null(&crtc->commit_list,
5053 						  struct drm_crtc_commit, commit_entry);
5054 		cleanup_done = commit ?
5055 			try_wait_for_completion(&commit->cleanup_done) : true;
5056 		spin_unlock(&crtc->commit_lock);
5057 
5058 		if (cleanup_done)
5059 			continue;
5060 
5061 		drm_crtc_wait_one_vblank(crtc);
5062 
5063 		return true;
5064 	}
5065 
5066 	return false;
5067 }
5068 
5069 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5070 {
5071 	u32 temp;
5072 
5073 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
5074 
5075 	mutex_lock(&dev_priv->sb_lock);
5076 
5077 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5078 	temp |= SBI_SSCCTL_DISABLE;
5079 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5080 
5081 	mutex_unlock(&dev_priv->sb_lock);
5082 }
5083 
5084 /* Program iCLKIP clock to the desired frequency */
5085 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5086 {
5087 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5088 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5089 	int clock = crtc_state->base.adjusted_mode.crtc_clock;
5090 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
5091 	u32 temp;
5092 
5093 	lpt_disable_iclkip(dev_priv);
5094 
5095 	/* The iCLK virtual clock root frequency is in MHz,
5096 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
5097 	 * divisors, it is necessary to divide one by another, so we
5098 	 * convert the virtual clock precision to KHz here for higher
5099 	 * precision.
5100 	 */
5101 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5102 		u32 iclk_virtual_root_freq = 172800 * 1000;
5103 		u32 iclk_pi_range = 64;
5104 		u32 desired_divisor;
5105 
5106 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5107 						    clock << auxdiv);
5108 		divsel = (desired_divisor / iclk_pi_range) - 2;
5109 		phaseinc = desired_divisor % iclk_pi_range;
5110 
5111 		/*
5112 		 * Near 20MHz is a corner case which is
5113 		 * out of range for the 7-bit divisor
5114 		 */
5115 		if (divsel <= 0x7f)
5116 			break;
5117 	}
5118 
5119 	/* This should not happen with any sane values */
5120 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5121 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5122 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5123 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5124 
5125 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5126 			clock,
5127 			auxdiv,
5128 			divsel,
5129 			phasedir,
5130 			phaseinc);
5131 
5132 	mutex_lock(&dev_priv->sb_lock);
5133 
5134 	/* Program SSCDIVINTPHASE6 */
5135 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5136 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5137 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5138 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5139 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5140 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5141 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5142 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5143 
5144 	/* Program SSCAUXDIV */
5145 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5146 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5147 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5148 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5149 
5150 	/* Enable modulator and associated divider */
5151 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5152 	temp &= ~SBI_SSCCTL_DISABLE;
5153 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5154 
5155 	mutex_unlock(&dev_priv->sb_lock);
5156 
5157 	/* Wait for initialization time */
5158 	udelay(24);
5159 
5160 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5161 }
5162 
5163 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5164 {
5165 	u32 divsel, phaseinc, auxdiv;
5166 	u32 iclk_virtual_root_freq = 172800 * 1000;
5167 	u32 iclk_pi_range = 64;
5168 	u32 desired_divisor;
5169 	u32 temp;
5170 
5171 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5172 		return 0;
5173 
5174 	mutex_lock(&dev_priv->sb_lock);
5175 
5176 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5177 	if (temp & SBI_SSCCTL_DISABLE) {
5178 		mutex_unlock(&dev_priv->sb_lock);
5179 		return 0;
5180 	}
5181 
5182 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5183 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5184 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5185 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5186 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5187 
5188 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5189 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5190 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5191 
5192 	mutex_unlock(&dev_priv->sb_lock);
5193 
5194 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5195 
5196 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5197 				 desired_divisor << auxdiv);
5198 }
5199 
5200 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5201 						enum pipe pch_transcoder)
5202 {
5203 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5204 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5205 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5206 
5207 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5208 		   I915_READ(HTOTAL(cpu_transcoder)));
5209 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5210 		   I915_READ(HBLANK(cpu_transcoder)));
5211 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5212 		   I915_READ(HSYNC(cpu_transcoder)));
5213 
5214 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5215 		   I915_READ(VTOTAL(cpu_transcoder)));
5216 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5217 		   I915_READ(VBLANK(cpu_transcoder)));
5218 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5219 		   I915_READ(VSYNC(cpu_transcoder)));
5220 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5221 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
5222 }
5223 
5224 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5225 {
5226 	u32 temp;
5227 
5228 	temp = I915_READ(SOUTH_CHICKEN1);
5229 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5230 		return;
5231 
5232 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5233 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5234 
5235 	temp &= ~FDI_BC_BIFURCATION_SELECT;
5236 	if (enable)
5237 		temp |= FDI_BC_BIFURCATION_SELECT;
5238 
5239 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5240 	I915_WRITE(SOUTH_CHICKEN1, temp);
5241 	POSTING_READ(SOUTH_CHICKEN1);
5242 }
5243 
5244 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5245 {
5246 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5247 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5248 
5249 	switch (crtc->pipe) {
5250 	case PIPE_A:
5251 		break;
5252 	case PIPE_B:
5253 		if (crtc_state->fdi_lanes > 2)
5254 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
5255 		else
5256 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
5257 
5258 		break;
5259 	case PIPE_C:
5260 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
5261 
5262 		break;
5263 	default:
5264 		BUG();
5265 	}
5266 }
5267 
5268 /*
5269  * Finds the encoder associated with the given CRTC. This can only be
5270  * used when we know that the CRTC isn't feeding multiple encoders!
5271  */
5272 static struct intel_encoder *
5273 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5274 			   const struct intel_crtc_state *crtc_state)
5275 {
5276 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5277 	const struct drm_connector_state *connector_state;
5278 	const struct drm_connector *connector;
5279 	struct intel_encoder *encoder = NULL;
5280 	int num_encoders = 0;
5281 	int i;
5282 
5283 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5284 		if (connector_state->crtc != &crtc->base)
5285 			continue;
5286 
5287 		encoder = to_intel_encoder(connector_state->best_encoder);
5288 		num_encoders++;
5289 	}
5290 
5291 	WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5292 	     num_encoders, pipe_name(crtc->pipe));
5293 
5294 	return encoder;
5295 }
5296 
5297 /*
5298  * Enable PCH resources required for PCH ports:
5299  *   - PCH PLLs
5300  *   - FDI training & RX/TX
5301  *   - update transcoder timings
5302  *   - DP transcoding bits
5303  *   - transcoder
5304  */
5305 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5306 				const struct intel_crtc_state *crtc_state)
5307 {
5308 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5309 	struct drm_device *dev = crtc->base.dev;
5310 	struct drm_i915_private *dev_priv = to_i915(dev);
5311 	enum pipe pipe = crtc->pipe;
5312 	u32 temp;
5313 
5314 	assert_pch_transcoder_disabled(dev_priv, pipe);
5315 
5316 	if (IS_IVYBRIDGE(dev_priv))
5317 		ivybridge_update_fdi_bc_bifurcation(crtc_state);
5318 
5319 	/* Write the TU size bits before fdi link training, so that error
5320 	 * detection works. */
5321 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
5322 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5323 
5324 	/* For PCH output, training FDI link */
5325 	dev_priv->display.fdi_link_train(crtc, crtc_state);
5326 
5327 	/* We need to program the right clock selection before writing the pixel
5328 	 * mutliplier into the DPLL. */
5329 	if (HAS_PCH_CPT(dev_priv)) {
5330 		u32 sel;
5331 
5332 		temp = I915_READ(PCH_DPLL_SEL);
5333 		temp |= TRANS_DPLL_ENABLE(pipe);
5334 		sel = TRANS_DPLLB_SEL(pipe);
5335 		if (crtc_state->shared_dpll ==
5336 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5337 			temp |= sel;
5338 		else
5339 			temp &= ~sel;
5340 		I915_WRITE(PCH_DPLL_SEL, temp);
5341 	}
5342 
5343 	/* XXX: pch pll's can be enabled any time before we enable the PCH
5344 	 * transcoder, and we actually should do this to not upset any PCH
5345 	 * transcoder that already use the clock when we share it.
5346 	 *
5347 	 * Note that enable_shared_dpll tries to do the right thing, but
5348 	 * get_shared_dpll unconditionally resets the pll - we need that to have
5349 	 * the right LVDS enable sequence. */
5350 	intel_enable_shared_dpll(crtc_state);
5351 
5352 	/* set transcoder timing, panel must allow it */
5353 	assert_panel_unlocked(dev_priv, pipe);
5354 	ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5355 
5356 	intel_fdi_normal_train(crtc);
5357 
5358 	/* For PCH DP, enable TRANS_DP_CTL */
5359 	if (HAS_PCH_CPT(dev_priv) &&
5360 	    intel_crtc_has_dp_encoder(crtc_state)) {
5361 		const struct drm_display_mode *adjusted_mode =
5362 			&crtc_state->base.adjusted_mode;
5363 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5364 		i915_reg_t reg = TRANS_DP_CTL(pipe);
5365 		enum port port;
5366 
5367 		temp = I915_READ(reg);
5368 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
5369 			  TRANS_DP_SYNC_MASK |
5370 			  TRANS_DP_BPC_MASK);
5371 		temp |= TRANS_DP_OUTPUT_ENABLE;
5372 		temp |= bpc << 9; /* same format but at 11:9 */
5373 
5374 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5375 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5376 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5377 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5378 
5379 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5380 		WARN_ON(port < PORT_B || port > PORT_D);
5381 		temp |= TRANS_DP_PORT_SEL(port);
5382 
5383 		I915_WRITE(reg, temp);
5384 	}
5385 
5386 	ironlake_enable_pch_transcoder(crtc_state);
5387 }
5388 
5389 static void lpt_pch_enable(const struct intel_atomic_state *state,
5390 			   const struct intel_crtc_state *crtc_state)
5391 {
5392 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5393 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5394 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5395 
5396 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5397 
5398 	lpt_program_iclkip(crtc_state);
5399 
5400 	/* Set transcoder timing. */
5401 	ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5402 
5403 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5404 }
5405 
5406 static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
5407 {
5408 	struct drm_i915_private *dev_priv = to_i915(dev);
5409 	i915_reg_t dslreg = PIPEDSL(pipe);
5410 	u32 temp;
5411 
5412 	temp = I915_READ(dslreg);
5413 	udelay(500);
5414 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
5415 		if (wait_for(I915_READ(dslreg) != temp, 5))
5416 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5417 	}
5418 }
5419 
5420 /*
5421  * The hardware phase 0.0 refers to the center of the pixel.
5422  * We want to start from the top/left edge which is phase
5423  * -0.5. That matches how the hardware calculates the scaling
5424  * factors (from top-left of the first pixel to bottom-right
5425  * of the last pixel, as opposed to the pixel centers).
5426  *
5427  * For 4:2:0 subsampled chroma planes we obviously have to
5428  * adjust that so that the chroma sample position lands in
5429  * the right spot.
5430  *
5431  * Note that for packed YCbCr 4:2:2 formats there is no way to
5432  * control chroma siting. The hardware simply replicates the
5433  * chroma samples for both of the luma samples, and thus we don't
5434  * actually get the expected MPEG2 chroma siting convention :(
5435  * The same behaviour is observed on pre-SKL platforms as well.
5436  *
5437  * Theory behind the formula (note that we ignore sub-pixel
5438  * source coordinates):
5439  * s = source sample position
5440  * d = destination sample position
5441  *
5442  * Downscaling 4:1:
5443  * -0.5
5444  * | 0.0
5445  * | |     1.5 (initial phase)
5446  * | |     |
5447  * v v     v
5448  * | s | s | s | s |
5449  * |       d       |
5450  *
5451  * Upscaling 1:4:
5452  * -0.5
5453  * | -0.375 (initial phase)
5454  * | |     0.0
5455  * | |     |
5456  * v v     v
5457  * |       s       |
5458  * | d | d | d | d |
5459  */
5460 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5461 {
5462 	int phase = -0x8000;
5463 	u16 trip = 0;
5464 
5465 	if (chroma_cosited)
5466 		phase += (sub - 1) * 0x8000 / sub;
5467 
5468 	phase += scale / (2 * sub);
5469 
5470 	/*
5471 	 * Hardware initial phase limited to [-0.5:1.5].
5472 	 * Since the max hardware scale factor is 3.0, we
5473 	 * should never actually excdeed 1.0 here.
5474 	 */
5475 	WARN_ON(phase < -0x8000 || phase > 0x18000);
5476 
5477 	if (phase < 0)
5478 		phase = 0x10000 + phase;
5479 	else
5480 		trip = PS_PHASE_TRIP;
5481 
5482 	return ((phase >> 2) & PS_PHASE_MASK) | trip;
5483 }
5484 
5485 #define SKL_MIN_SRC_W 8
5486 #define SKL_MAX_SRC_W 4096
5487 #define SKL_MIN_SRC_H 8
5488 #define SKL_MAX_SRC_H 4096
5489 #define SKL_MIN_DST_W 8
5490 #define SKL_MAX_DST_W 4096
5491 #define SKL_MIN_DST_H 8
5492 #define SKL_MAX_DST_H 4096
5493 #define ICL_MAX_SRC_W 5120
5494 #define ICL_MAX_SRC_H 4096
5495 #define ICL_MAX_DST_W 5120
5496 #define ICL_MAX_DST_H 4096
5497 #define SKL_MIN_YUV_420_SRC_W 16
5498 #define SKL_MIN_YUV_420_SRC_H 16
5499 
5500 static int
5501 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5502 		  unsigned int scaler_user, int *scaler_id,
5503 		  int src_w, int src_h, int dst_w, int dst_h,
5504 		  const struct drm_format_info *format, bool need_scaler)
5505 {
5506 	struct intel_crtc_scaler_state *scaler_state =
5507 		&crtc_state->scaler_state;
5508 	struct intel_crtc *intel_crtc =
5509 		to_intel_crtc(crtc_state->base.crtc);
5510 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5511 	const struct drm_display_mode *adjusted_mode =
5512 		&crtc_state->base.adjusted_mode;
5513 
5514 	/*
5515 	 * Src coordinates are already rotated by 270 degrees for
5516 	 * the 90/270 degree plane rotation cases (to match the
5517 	 * GTT mapping), hence no need to account for rotation here.
5518 	 */
5519 	if (src_w != dst_w || src_h != dst_h)
5520 		need_scaler = true;
5521 
5522 	/*
5523 	 * Scaling/fitting not supported in IF-ID mode in GEN9+
5524 	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5525 	 * Once NV12 is enabled, handle it here while allocating scaler
5526 	 * for NV12.
5527 	 */
5528 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5529 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5530 		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5531 		return -EINVAL;
5532 	}
5533 
5534 	/*
5535 	 * if plane is being disabled or scaler is no more required or force detach
5536 	 *  - free scaler binded to this plane/crtc
5537 	 *  - in order to do this, update crtc->scaler_usage
5538 	 *
5539 	 * Here scaler state in crtc_state is set free so that
5540 	 * scaler can be assigned to other user. Actual register
5541 	 * update to free the scaler is done in plane/panel-fit programming.
5542 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5543 	 */
5544 	if (force_detach || !need_scaler) {
5545 		if (*scaler_id >= 0) {
5546 			scaler_state->scaler_users &= ~(1 << scaler_user);
5547 			scaler_state->scalers[*scaler_id].in_use = 0;
5548 
5549 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
5550 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
5551 				intel_crtc->pipe, scaler_user, *scaler_id,
5552 				scaler_state->scaler_users);
5553 			*scaler_id = -1;
5554 		}
5555 		return 0;
5556 	}
5557 
5558 	if (format && drm_format_info_is_yuv_semiplanar(format) &&
5559 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5560 		DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5561 		return -EINVAL;
5562 	}
5563 
5564 	/* range checks */
5565 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5566 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5567 	    (INTEL_GEN(dev_priv) >= 11 &&
5568 	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5569 	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5570 	    (INTEL_GEN(dev_priv) < 11 &&
5571 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5572 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
5573 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5574 			"size is out of scaler range\n",
5575 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5576 		return -EINVAL;
5577 	}
5578 
5579 	/* mark this plane as a scaler user in crtc_state */
5580 	scaler_state->scaler_users |= (1 << scaler_user);
5581 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
5582 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5583 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5584 		scaler_state->scaler_users);
5585 
5586 	return 0;
5587 }
5588 
5589 /**
5590  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5591  *
5592  * @state: crtc's scaler state
5593  *
5594  * Return
5595  *     0 - scaler_usage updated successfully
5596  *    error - requested scaling cannot be supported or other error condition
5597  */
5598 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5599 {
5600 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5601 	bool need_scaler = false;
5602 
5603 	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5604 		need_scaler = true;
5605 
5606 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5607 				 &state->scaler_state.scaler_id,
5608 				 state->pipe_src_w, state->pipe_src_h,
5609 				 adjusted_mode->crtc_hdisplay,
5610 				 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5611 }
5612 
5613 /**
5614  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5615  * @crtc_state: crtc's scaler state
5616  * @plane_state: atomic plane state to update
5617  *
5618  * Return
5619  *     0 - scaler_usage updated successfully
5620  *    error - requested scaling cannot be supported or other error condition
5621  */
5622 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5623 				   struct intel_plane_state *plane_state)
5624 {
5625 	struct intel_plane *intel_plane =
5626 		to_intel_plane(plane_state->base.plane);
5627 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5628 	struct drm_framebuffer *fb = plane_state->base.fb;
5629 	int ret;
5630 	bool force_detach = !fb || !plane_state->base.visible;
5631 	bool need_scaler = false;
5632 
5633 	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5634 	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5635 	    fb && drm_format_info_is_yuv_semiplanar(fb->format))
5636 		need_scaler = true;
5637 
5638 	ret = skl_update_scaler(crtc_state, force_detach,
5639 				drm_plane_index(&intel_plane->base),
5640 				&plane_state->scaler_id,
5641 				drm_rect_width(&plane_state->base.src) >> 16,
5642 				drm_rect_height(&plane_state->base.src) >> 16,
5643 				drm_rect_width(&plane_state->base.dst),
5644 				drm_rect_height(&plane_state->base.dst),
5645 				fb ? fb->format : NULL, need_scaler);
5646 
5647 	if (ret || plane_state->scaler_id < 0)
5648 		return ret;
5649 
5650 	/* check colorkey */
5651 	if (plane_state->ckey.flags) {
5652 		DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5653 			      intel_plane->base.base.id,
5654 			      intel_plane->base.name);
5655 		return -EINVAL;
5656 	}
5657 
5658 	/* Check src format */
5659 	switch (fb->format->format) {
5660 	case DRM_FORMAT_RGB565:
5661 	case DRM_FORMAT_XBGR8888:
5662 	case DRM_FORMAT_XRGB8888:
5663 	case DRM_FORMAT_ABGR8888:
5664 	case DRM_FORMAT_ARGB8888:
5665 	case DRM_FORMAT_XRGB2101010:
5666 	case DRM_FORMAT_XBGR2101010:
5667 	case DRM_FORMAT_YUYV:
5668 	case DRM_FORMAT_YVYU:
5669 	case DRM_FORMAT_UYVY:
5670 	case DRM_FORMAT_VYUY:
5671 	case DRM_FORMAT_NV12:
5672 	case DRM_FORMAT_P010:
5673 	case DRM_FORMAT_P012:
5674 	case DRM_FORMAT_P016:
5675 	case DRM_FORMAT_Y210:
5676 	case DRM_FORMAT_Y212:
5677 	case DRM_FORMAT_Y216:
5678 	case DRM_FORMAT_XVYU2101010:
5679 	case DRM_FORMAT_XVYU12_16161616:
5680 	case DRM_FORMAT_XVYU16161616:
5681 		break;
5682 	case DRM_FORMAT_XBGR16161616F:
5683 	case DRM_FORMAT_ABGR16161616F:
5684 	case DRM_FORMAT_XRGB16161616F:
5685 	case DRM_FORMAT_ARGB16161616F:
5686 		if (INTEL_GEN(dev_priv) >= 11)
5687 			break;
5688 		/* fall through */
5689 	default:
5690 		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5691 			      intel_plane->base.base.id, intel_plane->base.name,
5692 			      fb->base.id, fb->format->format);
5693 		return -EINVAL;
5694 	}
5695 
5696 	return 0;
5697 }
5698 
5699 static void skylake_scaler_disable(struct intel_crtc *crtc)
5700 {
5701 	int i;
5702 
5703 	for (i = 0; i < crtc->num_scalers; i++)
5704 		skl_detach_scaler(crtc, i);
5705 }
5706 
5707 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5708 {
5709 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5710 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5711 	enum pipe pipe = crtc->pipe;
5712 	const struct intel_crtc_scaler_state *scaler_state =
5713 		&crtc_state->scaler_state;
5714 
5715 	if (crtc_state->pch_pfit.enabled) {
5716 		u16 uv_rgb_hphase, uv_rgb_vphase;
5717 		int pfit_w, pfit_h, hscale, vscale;
5718 		int id;
5719 
5720 		if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5721 			return;
5722 
5723 		pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5724 		pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5725 
5726 		hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5727 		vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5728 
5729 		uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5730 		uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5731 
5732 		id = scaler_state->scaler_id;
5733 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5734 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5735 		I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5736 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5737 		I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5738 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5739 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5740 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5741 	}
5742 }
5743 
5744 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5745 {
5746 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5747 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5748 	enum pipe pipe = crtc->pipe;
5749 
5750 	if (crtc_state->pch_pfit.enabled) {
5751 		/* Force use of hard-coded filter coefficients
5752 		 * as some pre-programmed values are broken,
5753 		 * e.g. x201.
5754 		 */
5755 		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5756 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5757 						 PF_PIPE_SEL_IVB(pipe));
5758 		else
5759 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5760 		I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5761 		I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5762 	}
5763 }
5764 
5765 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5766 {
5767 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5768 	struct drm_device *dev = crtc->base.dev;
5769 	struct drm_i915_private *dev_priv = to_i915(dev);
5770 
5771 	if (!crtc_state->ips_enabled)
5772 		return;
5773 
5774 	/*
5775 	 * We can only enable IPS after we enable a plane and wait for a vblank
5776 	 * This function is called from post_plane_update, which is run after
5777 	 * a vblank wait.
5778 	 */
5779 	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5780 
5781 	if (IS_BROADWELL(dev_priv)) {
5782 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5783 						IPS_ENABLE | IPS_PCODE_CONTROL));
5784 		/* Quoting Art Runyan: "its not safe to expect any particular
5785 		 * value in IPS_CTL bit 31 after enabling IPS through the
5786 		 * mailbox." Moreover, the mailbox may return a bogus state,
5787 		 * so we need to just enable it and continue on.
5788 		 */
5789 	} else {
5790 		I915_WRITE(IPS_CTL, IPS_ENABLE);
5791 		/* The bit only becomes 1 in the next vblank, so this wait here
5792 		 * is essentially intel_wait_for_vblank. If we don't have this
5793 		 * and don't wait for vblanks until the end of crtc_enable, then
5794 		 * the HW state readout code will complain that the expected
5795 		 * IPS_CTL value is not the one we read. */
5796 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
5797 			DRM_ERROR("Timed out waiting for IPS enable\n");
5798 	}
5799 }
5800 
5801 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5802 {
5803 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5804 	struct drm_device *dev = crtc->base.dev;
5805 	struct drm_i915_private *dev_priv = to_i915(dev);
5806 
5807 	if (!crtc_state->ips_enabled)
5808 		return;
5809 
5810 	if (IS_BROADWELL(dev_priv)) {
5811 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5812 		/*
5813 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
5814 		 * 42ms timeout value leads to occasional timeouts so use 100ms
5815 		 * instead.
5816 		 */
5817 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
5818 			DRM_ERROR("Timed out waiting for IPS disable\n");
5819 	} else {
5820 		I915_WRITE(IPS_CTL, 0);
5821 		POSTING_READ(IPS_CTL);
5822 	}
5823 
5824 	/* We need to wait for a vblank before we can disable the plane. */
5825 	intel_wait_for_vblank(dev_priv, crtc->pipe);
5826 }
5827 
5828 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5829 {
5830 	if (intel_crtc->overlay)
5831 		(void) intel_overlay_switch_off(intel_crtc->overlay);
5832 
5833 	/* Let userspace switch the overlay on again. In most cases userspace
5834 	 * has to recompute where to put it anyway.
5835 	 */
5836 }
5837 
5838 /**
5839  * intel_post_enable_primary - Perform operations after enabling primary plane
5840  * @crtc: the CRTC whose primary plane was just enabled
5841  * @new_crtc_state: the enabling state
5842  *
5843  * Performs potentially sleeping operations that must be done after the primary
5844  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5845  * called due to an explicit primary plane update, or due to an implicit
5846  * re-enable that is caused when a sprite plane is updated to no longer
5847  * completely hide the primary plane.
5848  */
5849 static void
5850 intel_post_enable_primary(struct drm_crtc *crtc,
5851 			  const struct intel_crtc_state *new_crtc_state)
5852 {
5853 	struct drm_device *dev = crtc->dev;
5854 	struct drm_i915_private *dev_priv = to_i915(dev);
5855 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5856 	enum pipe pipe = intel_crtc->pipe;
5857 
5858 	/*
5859 	 * Gen2 reports pipe underruns whenever all planes are disabled.
5860 	 * So don't enable underrun reporting before at least some planes
5861 	 * are enabled.
5862 	 * FIXME: Need to fix the logic to work when we turn off all planes
5863 	 * but leave the pipe running.
5864 	 */
5865 	if (IS_GEN(dev_priv, 2))
5866 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5867 
5868 	/* Underruns don't always raise interrupts, so check manually. */
5869 	intel_check_cpu_fifo_underruns(dev_priv);
5870 	intel_check_pch_fifo_underruns(dev_priv);
5871 }
5872 
5873 /* FIXME get rid of this and use pre_plane_update */
5874 static void
5875 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5876 {
5877 	struct drm_device *dev = crtc->dev;
5878 	struct drm_i915_private *dev_priv = to_i915(dev);
5879 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5880 	enum pipe pipe = intel_crtc->pipe;
5881 
5882 	/*
5883 	 * Gen2 reports pipe underruns whenever all planes are disabled.
5884 	 * So disable underrun reporting before all the planes get disabled.
5885 	 */
5886 	if (IS_GEN(dev_priv, 2))
5887 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5888 
5889 	hsw_disable_ips(to_intel_crtc_state(crtc->state));
5890 
5891 	/*
5892 	 * Vblank time updates from the shadow to live plane control register
5893 	 * are blocked if the memory self-refresh mode is active at that
5894 	 * moment. So to make sure the plane gets truly disabled, disable
5895 	 * first the self-refresh mode. The self-refresh enable bit in turn
5896 	 * will be checked/applied by the HW only at the next frame start
5897 	 * event which is after the vblank start event, so we need to have a
5898 	 * wait-for-vblank between disabling the plane and the pipe.
5899 	 */
5900 	if (HAS_GMCH(dev_priv) &&
5901 	    intel_set_memory_cxsr(dev_priv, false))
5902 		intel_wait_for_vblank(dev_priv, pipe);
5903 }
5904 
5905 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5906 				       const struct intel_crtc_state *new_crtc_state)
5907 {
5908 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5909 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5910 
5911 	if (!old_crtc_state->ips_enabled)
5912 		return false;
5913 
5914 	if (needs_modeset(new_crtc_state))
5915 		return true;
5916 
5917 	/*
5918 	 * Workaround : Do not read or write the pipe palette/gamma data while
5919 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5920 	 *
5921 	 * Disable IPS before we program the LUT.
5922 	 */
5923 	if (IS_HASWELL(dev_priv) &&
5924 	    (new_crtc_state->base.color_mgmt_changed ||
5925 	     new_crtc_state->update_pipe) &&
5926 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5927 		return true;
5928 
5929 	return !new_crtc_state->ips_enabled;
5930 }
5931 
5932 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5933 				       const struct intel_crtc_state *new_crtc_state)
5934 {
5935 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5936 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5937 
5938 	if (!new_crtc_state->ips_enabled)
5939 		return false;
5940 
5941 	if (needs_modeset(new_crtc_state))
5942 		return true;
5943 
5944 	/*
5945 	 * Workaround : Do not read or write the pipe palette/gamma data while
5946 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5947 	 *
5948 	 * Re-enable IPS after the LUT has been programmed.
5949 	 */
5950 	if (IS_HASWELL(dev_priv) &&
5951 	    (new_crtc_state->base.color_mgmt_changed ||
5952 	     new_crtc_state->update_pipe) &&
5953 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5954 		return true;
5955 
5956 	/*
5957 	 * We can't read out IPS on broadwell, assume the worst and
5958 	 * forcibly enable IPS on the first fastset.
5959 	 */
5960 	if (new_crtc_state->update_pipe &&
5961 	    old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5962 		return true;
5963 
5964 	return !old_crtc_state->ips_enabled;
5965 }
5966 
5967 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5968 			  const struct intel_crtc_state *crtc_state)
5969 {
5970 	if (!crtc_state->nv12_planes)
5971 		return false;
5972 
5973 	/* WA Display #0827: Gen9:all */
5974 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5975 		return true;
5976 
5977 	return false;
5978 }
5979 
5980 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5981 			       const struct intel_crtc_state *crtc_state)
5982 {
5983 	/* Wa_2006604312:icl */
5984 	if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5985 		return true;
5986 
5987 	return false;
5988 }
5989 
5990 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5991 {
5992 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5993 	struct drm_device *dev = crtc->base.dev;
5994 	struct drm_i915_private *dev_priv = to_i915(dev);
5995 	struct drm_atomic_state *state = old_crtc_state->base.state;
5996 	struct intel_crtc_state *pipe_config =
5997 		intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
5998 						crtc);
5999 	struct drm_plane *primary = crtc->base.primary;
6000 	struct drm_plane_state *old_primary_state =
6001 		drm_atomic_get_old_plane_state(state, primary);
6002 
6003 	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
6004 
6005 	if (pipe_config->update_wm_post && pipe_config->base.active)
6006 		intel_update_watermarks(crtc);
6007 
6008 	if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
6009 		hsw_enable_ips(pipe_config);
6010 
6011 	if (old_primary_state) {
6012 		struct drm_plane_state *new_primary_state =
6013 			drm_atomic_get_new_plane_state(state, primary);
6014 
6015 		intel_fbc_post_update(crtc);
6016 
6017 		if (new_primary_state->visible &&
6018 		    (needs_modeset(pipe_config) ||
6019 		     !old_primary_state->visible))
6020 			intel_post_enable_primary(&crtc->base, pipe_config);
6021 	}
6022 
6023 	if (needs_nv12_wa(dev_priv, old_crtc_state) &&
6024 	    !needs_nv12_wa(dev_priv, pipe_config))
6025 		skl_wa_827(dev_priv, crtc->pipe, false);
6026 
6027 	if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6028 	    !needs_scalerclk_wa(dev_priv, pipe_config))
6029 		icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
6030 }
6031 
6032 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
6033 				   struct intel_crtc_state *pipe_config)
6034 {
6035 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6036 	struct drm_device *dev = crtc->base.dev;
6037 	struct drm_i915_private *dev_priv = to_i915(dev);
6038 	struct drm_atomic_state *state = old_crtc_state->base.state;
6039 	struct drm_plane *primary = crtc->base.primary;
6040 	struct drm_plane_state *old_primary_state =
6041 		drm_atomic_get_old_plane_state(state, primary);
6042 	bool modeset = needs_modeset(pipe_config);
6043 	struct intel_atomic_state *intel_state =
6044 		to_intel_atomic_state(state);
6045 
6046 	if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
6047 		hsw_disable_ips(old_crtc_state);
6048 
6049 	if (old_primary_state) {
6050 		struct intel_plane_state *new_primary_state =
6051 			intel_atomic_get_new_plane_state(intel_state,
6052 							 to_intel_plane(primary));
6053 
6054 		intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
6055 		/*
6056 		 * Gen2 reports pipe underruns whenever all planes are disabled.
6057 		 * So disable underrun reporting before all the planes get disabled.
6058 		 */
6059 		if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
6060 		    (modeset || !new_primary_state->base.visible))
6061 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
6062 	}
6063 
6064 	/* Display WA 827 */
6065 	if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
6066 	    needs_nv12_wa(dev_priv, pipe_config))
6067 		skl_wa_827(dev_priv, crtc->pipe, true);
6068 
6069 	/* Wa_2006604312:icl */
6070 	if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
6071 	    needs_scalerclk_wa(dev_priv, pipe_config))
6072 		icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
6073 
6074 	/*
6075 	 * Vblank time updates from the shadow to live plane control register
6076 	 * are blocked if the memory self-refresh mode is active at that
6077 	 * moment. So to make sure the plane gets truly disabled, disable
6078 	 * first the self-refresh mode. The self-refresh enable bit in turn
6079 	 * will be checked/applied by the HW only at the next frame start
6080 	 * event which is after the vblank start event, so we need to have a
6081 	 * wait-for-vblank between disabling the plane and the pipe.
6082 	 */
6083 	if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
6084 	    pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6085 		intel_wait_for_vblank(dev_priv, crtc->pipe);
6086 
6087 	/*
6088 	 * IVB workaround: must disable low power watermarks for at least
6089 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
6090 	 * when scaling is disabled.
6091 	 *
6092 	 * WaCxSRDisabledForSpriteScaling:ivb
6093 	 */
6094 	if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
6095 	    old_crtc_state->base.active)
6096 		intel_wait_for_vblank(dev_priv, crtc->pipe);
6097 
6098 	/*
6099 	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
6100 	 * watermark programming here.
6101 	 */
6102 	if (needs_modeset(pipe_config))
6103 		return;
6104 
6105 	/*
6106 	 * For platforms that support atomic watermarks, program the
6107 	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6108 	 * will be the intermediate values that are safe for both pre- and
6109 	 * post- vblank; when vblank happens, the 'active' values will be set
6110 	 * to the final 'target' values and we'll do this again to get the
6111 	 * optimal watermarks.  For gen9+ platforms, the values we program here
6112 	 * will be the final target values which will get automatically latched
6113 	 * at vblank time; no further programming will be necessary.
6114 	 *
6115 	 * If a platform hasn't been transitioned to atomic watermarks yet,
6116 	 * we'll continue to update watermarks the old way, if flags tell
6117 	 * us to.
6118 	 */
6119 	if (dev_priv->display.initial_watermarks != NULL)
6120 		dev_priv->display.initial_watermarks(intel_state,
6121 						     pipe_config);
6122 	else if (pipe_config->update_wm_pre)
6123 		intel_update_watermarks(crtc);
6124 }
6125 
6126 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6127 				      struct intel_crtc *crtc)
6128 {
6129 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6130 	const struct intel_crtc_state *new_crtc_state =
6131 		intel_atomic_get_new_crtc_state(state, crtc);
6132 	unsigned int update_mask = new_crtc_state->update_planes;
6133 	const struct intel_plane_state *old_plane_state;
6134 	struct intel_plane *plane;
6135 	unsigned fb_bits = 0;
6136 	int i;
6137 
6138 	intel_crtc_dpms_overlay_disable(crtc);
6139 
6140 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6141 		if (crtc->pipe != plane->pipe ||
6142 		    !(update_mask & BIT(plane->id)))
6143 			continue;
6144 
6145 		intel_disable_plane(plane, new_crtc_state);
6146 
6147 		if (old_plane_state->base.visible)
6148 			fb_bits |= plane->frontbuffer_bit;
6149 	}
6150 
6151 	intel_frontbuffer_flip(dev_priv, fb_bits);
6152 }
6153 
6154 /*
6155  * intel_connector_primary_encoder - get the primary encoder for a connector
6156  * @connector: connector for which to return the encoder
6157  *
6158  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6159  * all connectors to their encoder, except for DP-MST connectors which have
6160  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6161  * pointed to by as many DP-MST connectors as there are pipes.
6162  */
6163 static struct intel_encoder *
6164 intel_connector_primary_encoder(struct intel_connector *connector)
6165 {
6166 	struct intel_encoder *encoder;
6167 
6168 	if (connector->mst_port)
6169 		return &dp_to_dig_port(connector->mst_port)->base;
6170 
6171 	encoder = intel_attached_encoder(&connector->base);
6172 	WARN_ON(!encoder);
6173 
6174 	return encoder;
6175 }
6176 
6177 static bool
6178 intel_connector_needs_modeset(struct intel_atomic_state *state,
6179 			      const struct drm_connector_state *old_conn_state,
6180 			      const struct drm_connector_state *new_conn_state)
6181 {
6182 	struct intel_crtc *old_crtc = old_conn_state->crtc ?
6183 				      to_intel_crtc(old_conn_state->crtc) : NULL;
6184 	struct intel_crtc *new_crtc = new_conn_state->crtc ?
6185 				      to_intel_crtc(new_conn_state->crtc) : NULL;
6186 
6187 	return new_crtc != old_crtc ||
6188 	       (new_crtc &&
6189 		needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
6190 }
6191 
6192 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6193 {
6194 	struct drm_connector_state *old_conn_state;
6195 	struct drm_connector_state *new_conn_state;
6196 	struct drm_connector *conn;
6197 	int i;
6198 
6199 	for_each_oldnew_connector_in_state(&state->base, conn,
6200 					   old_conn_state, new_conn_state, i) {
6201 		struct intel_encoder *encoder;
6202 		struct intel_crtc *crtc;
6203 
6204 		if (!intel_connector_needs_modeset(state,
6205 						   old_conn_state,
6206 						   new_conn_state))
6207 			continue;
6208 
6209 		encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6210 		if (!encoder->update_prepare)
6211 			continue;
6212 
6213 		crtc = new_conn_state->crtc ?
6214 			to_intel_crtc(new_conn_state->crtc) : NULL;
6215 		encoder->update_prepare(state, encoder, crtc);
6216 	}
6217 }
6218 
6219 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6220 {
6221 	struct drm_connector_state *old_conn_state;
6222 	struct drm_connector_state *new_conn_state;
6223 	struct drm_connector *conn;
6224 	int i;
6225 
6226 	for_each_oldnew_connector_in_state(&state->base, conn,
6227 					   old_conn_state, new_conn_state, i) {
6228 		struct intel_encoder *encoder;
6229 		struct intel_crtc *crtc;
6230 
6231 		if (!intel_connector_needs_modeset(state,
6232 						   old_conn_state,
6233 						   new_conn_state))
6234 			continue;
6235 
6236 		encoder = intel_connector_primary_encoder(to_intel_connector(conn));
6237 		if (!encoder->update_complete)
6238 			continue;
6239 
6240 		crtc = new_conn_state->crtc ?
6241 			to_intel_crtc(new_conn_state->crtc) : NULL;
6242 		encoder->update_complete(state, encoder, crtc);
6243 	}
6244 }
6245 
6246 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
6247 					  struct intel_crtc_state *crtc_state,
6248 					  struct intel_atomic_state *state)
6249 {
6250 	struct drm_connector_state *conn_state;
6251 	struct drm_connector *conn;
6252 	int i;
6253 
6254 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6255 		struct intel_encoder *encoder =
6256 			to_intel_encoder(conn_state->best_encoder);
6257 
6258 		if (conn_state->crtc != &crtc->base)
6259 			continue;
6260 
6261 		if (encoder->pre_pll_enable)
6262 			encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6263 	}
6264 }
6265 
6266 static void intel_encoders_pre_enable(struct intel_crtc *crtc,
6267 				      struct intel_crtc_state *crtc_state,
6268 				      struct intel_atomic_state *state)
6269 {
6270 	struct drm_connector_state *conn_state;
6271 	struct drm_connector *conn;
6272 	int i;
6273 
6274 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6275 		struct intel_encoder *encoder =
6276 			to_intel_encoder(conn_state->best_encoder);
6277 
6278 		if (conn_state->crtc != &crtc->base)
6279 			continue;
6280 
6281 		if (encoder->pre_enable)
6282 			encoder->pre_enable(encoder, crtc_state, conn_state);
6283 	}
6284 }
6285 
6286 static void intel_encoders_enable(struct intel_crtc *crtc,
6287 				  struct intel_crtc_state *crtc_state,
6288 				  struct intel_atomic_state *state)
6289 {
6290 	struct drm_connector_state *conn_state;
6291 	struct drm_connector *conn;
6292 	int i;
6293 
6294 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6295 		struct intel_encoder *encoder =
6296 			to_intel_encoder(conn_state->best_encoder);
6297 
6298 		if (conn_state->crtc != &crtc->base)
6299 			continue;
6300 
6301 		if (encoder->enable)
6302 			encoder->enable(encoder, crtc_state, conn_state);
6303 		intel_opregion_notify_encoder(encoder, true);
6304 	}
6305 }
6306 
6307 static void intel_encoders_disable(struct intel_crtc *crtc,
6308 				   struct intel_crtc_state *old_crtc_state,
6309 				   struct intel_atomic_state *state)
6310 {
6311 	struct drm_connector_state *old_conn_state;
6312 	struct drm_connector *conn;
6313 	int i;
6314 
6315 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6316 		struct intel_encoder *encoder =
6317 			to_intel_encoder(old_conn_state->best_encoder);
6318 
6319 		if (old_conn_state->crtc != &crtc->base)
6320 			continue;
6321 
6322 		intel_opregion_notify_encoder(encoder, false);
6323 		if (encoder->disable)
6324 			encoder->disable(encoder, old_crtc_state, old_conn_state);
6325 	}
6326 }
6327 
6328 static void intel_encoders_post_disable(struct intel_crtc *crtc,
6329 					struct intel_crtc_state *old_crtc_state,
6330 					struct intel_atomic_state *state)
6331 {
6332 	struct drm_connector_state *old_conn_state;
6333 	struct drm_connector *conn;
6334 	int i;
6335 
6336 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6337 		struct intel_encoder *encoder =
6338 			to_intel_encoder(old_conn_state->best_encoder);
6339 
6340 		if (old_conn_state->crtc != &crtc->base)
6341 			continue;
6342 
6343 		if (encoder->post_disable)
6344 			encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6345 	}
6346 }
6347 
6348 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
6349 					    struct intel_crtc_state *old_crtc_state,
6350 					    struct intel_atomic_state *state)
6351 {
6352 	struct drm_connector_state *old_conn_state;
6353 	struct drm_connector *conn;
6354 	int i;
6355 
6356 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6357 		struct intel_encoder *encoder =
6358 			to_intel_encoder(old_conn_state->best_encoder);
6359 
6360 		if (old_conn_state->crtc != &crtc->base)
6361 			continue;
6362 
6363 		if (encoder->post_pll_disable)
6364 			encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6365 	}
6366 }
6367 
6368 static void intel_encoders_update_pipe(struct intel_crtc *crtc,
6369 				       struct intel_crtc_state *crtc_state,
6370 				       struct intel_atomic_state *state)
6371 {
6372 	struct drm_connector_state *conn_state;
6373 	struct drm_connector *conn;
6374 	int i;
6375 
6376 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6377 		struct intel_encoder *encoder =
6378 			to_intel_encoder(conn_state->best_encoder);
6379 
6380 		if (conn_state->crtc != &crtc->base)
6381 			continue;
6382 
6383 		if (encoder->update_pipe)
6384 			encoder->update_pipe(encoder, crtc_state, conn_state);
6385 	}
6386 }
6387 
6388 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6389 {
6390 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6391 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6392 
6393 	plane->disable_plane(plane, crtc_state);
6394 }
6395 
6396 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6397 				 struct intel_atomic_state *state)
6398 {
6399 	struct drm_crtc *crtc = pipe_config->base.crtc;
6400 	struct drm_device *dev = crtc->dev;
6401 	struct drm_i915_private *dev_priv = to_i915(dev);
6402 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6403 	enum pipe pipe = intel_crtc->pipe;
6404 
6405 	if (WARN_ON(intel_crtc->active))
6406 		return;
6407 
6408 	/*
6409 	 * Sometimes spurious CPU pipe underruns happen during FDI
6410 	 * training, at least with VGA+HDMI cloning. Suppress them.
6411 	 *
6412 	 * On ILK we get an occasional spurious CPU pipe underruns
6413 	 * between eDP port A enable and vdd enable. Also PCH port
6414 	 * enable seems to result in the occasional CPU pipe underrun.
6415 	 *
6416 	 * Spurious PCH underruns also occur during PCH enabling.
6417 	 */
6418 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6419 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6420 
6421 	if (pipe_config->has_pch_encoder)
6422 		intel_prepare_shared_dpll(pipe_config);
6423 
6424 	if (intel_crtc_has_dp_encoder(pipe_config))
6425 		intel_dp_set_m_n(pipe_config, M1_N1);
6426 
6427 	intel_set_pipe_timings(pipe_config);
6428 	intel_set_pipe_src_size(pipe_config);
6429 
6430 	if (pipe_config->has_pch_encoder) {
6431 		intel_cpu_transcoder_set_m_n(pipe_config,
6432 					     &pipe_config->fdi_m_n, NULL);
6433 	}
6434 
6435 	ironlake_set_pipeconf(pipe_config);
6436 
6437 	intel_crtc->active = true;
6438 
6439 	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6440 
6441 	if (pipe_config->has_pch_encoder) {
6442 		/* Note: FDI PLL enabling _must_ be done before we enable the
6443 		 * cpu pipes, hence this is separate from all the other fdi/pch
6444 		 * enabling. */
6445 		ironlake_fdi_pll_enable(pipe_config);
6446 	} else {
6447 		assert_fdi_tx_disabled(dev_priv, pipe);
6448 		assert_fdi_rx_disabled(dev_priv, pipe);
6449 	}
6450 
6451 	ironlake_pfit_enable(pipe_config);
6452 
6453 	/*
6454 	 * On ILK+ LUT must be loaded before the pipe is running but with
6455 	 * clocks enabled
6456 	 */
6457 	intel_color_load_luts(pipe_config);
6458 	intel_color_commit(pipe_config);
6459 	/* update DSPCNTR to configure gamma for pipe bottom color */
6460 	intel_disable_primary_plane(pipe_config);
6461 
6462 	if (dev_priv->display.initial_watermarks != NULL)
6463 		dev_priv->display.initial_watermarks(state, pipe_config);
6464 	intel_enable_pipe(pipe_config);
6465 
6466 	if (pipe_config->has_pch_encoder)
6467 		ironlake_pch_enable(state, pipe_config);
6468 
6469 	assert_vblank_disabled(crtc);
6470 	intel_crtc_vblank_on(pipe_config);
6471 
6472 	intel_encoders_enable(intel_crtc, pipe_config, state);
6473 
6474 	if (HAS_PCH_CPT(dev_priv))
6475 		cpt_verify_modeset(dev, intel_crtc->pipe);
6476 
6477 	/*
6478 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6479 	 * And a second vblank wait is needed at least on ILK with
6480 	 * some interlaced HDMI modes. Let's do the double wait always
6481 	 * in case there are more corner cases we don't know about.
6482 	 */
6483 	if (pipe_config->has_pch_encoder) {
6484 		intel_wait_for_vblank(dev_priv, pipe);
6485 		intel_wait_for_vblank(dev_priv, pipe);
6486 	}
6487 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6488 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6489 }
6490 
6491 /* IPS only exists on ULT machines and is tied to pipe A. */
6492 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6493 {
6494 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6495 }
6496 
6497 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6498 					    enum pipe pipe, bool apply)
6499 {
6500 	u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6501 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6502 
6503 	if (apply)
6504 		val |= mask;
6505 	else
6506 		val &= ~mask;
6507 
6508 	I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6509 }
6510 
6511 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6512 {
6513 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6514 	enum pipe pipe = crtc->pipe;
6515 	u32 val;
6516 
6517 	val = MBUS_DBOX_A_CREDIT(2);
6518 
6519 	if (INTEL_GEN(dev_priv) >= 12) {
6520 		val |= MBUS_DBOX_BW_CREDIT(2);
6521 		val |= MBUS_DBOX_B_CREDIT(12);
6522 	} else {
6523 		val |= MBUS_DBOX_BW_CREDIT(1);
6524 		val |= MBUS_DBOX_B_CREDIT(8);
6525 	}
6526 
6527 	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6528 }
6529 
6530 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6531 				struct intel_atomic_state *state)
6532 {
6533 	struct drm_crtc *crtc = pipe_config->base.crtc;
6534 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6535 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6536 	enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
6537 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6538 	bool psl_clkgate_wa;
6539 
6540 	if (WARN_ON(intel_crtc->active))
6541 		return;
6542 
6543 	intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6544 
6545 	if (pipe_config->shared_dpll)
6546 		intel_enable_shared_dpll(pipe_config);
6547 
6548 	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6549 
6550 	if (intel_crtc_has_dp_encoder(pipe_config))
6551 		intel_dp_set_m_n(pipe_config, M1_N1);
6552 
6553 	if (!transcoder_is_dsi(cpu_transcoder))
6554 		intel_set_pipe_timings(pipe_config);
6555 
6556 	if (INTEL_GEN(dev_priv) >= 11)
6557 		icl_enable_trans_port_sync(pipe_config);
6558 
6559 	intel_set_pipe_src_size(pipe_config);
6560 
6561 	if (cpu_transcoder != TRANSCODER_EDP &&
6562 	    !transcoder_is_dsi(cpu_transcoder)) {
6563 		I915_WRITE(PIPE_MULT(cpu_transcoder),
6564 			   pipe_config->pixel_multiplier - 1);
6565 	}
6566 
6567 	if (pipe_config->has_pch_encoder) {
6568 		intel_cpu_transcoder_set_m_n(pipe_config,
6569 					     &pipe_config->fdi_m_n, NULL);
6570 	}
6571 
6572 	if (!transcoder_is_dsi(cpu_transcoder))
6573 		haswell_set_pipeconf(pipe_config);
6574 
6575 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6576 		bdw_set_pipemisc(pipe_config);
6577 
6578 	intel_crtc->active = true;
6579 
6580 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6581 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6582 			 pipe_config->pch_pfit.enabled;
6583 	if (psl_clkgate_wa)
6584 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6585 
6586 	if (INTEL_GEN(dev_priv) >= 9)
6587 		skylake_pfit_enable(pipe_config);
6588 	else
6589 		ironlake_pfit_enable(pipe_config);
6590 
6591 	/*
6592 	 * On ILK+ LUT must be loaded before the pipe is running but with
6593 	 * clocks enabled
6594 	 */
6595 	intel_color_load_luts(pipe_config);
6596 	intel_color_commit(pipe_config);
6597 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
6598 	if (INTEL_GEN(dev_priv) < 9)
6599 		intel_disable_primary_plane(pipe_config);
6600 
6601 	if (INTEL_GEN(dev_priv) >= 11)
6602 		icl_set_pipe_chicken(intel_crtc);
6603 
6604 	if (!transcoder_is_dsi(cpu_transcoder))
6605 		intel_ddi_enable_transcoder_func(pipe_config);
6606 
6607 	if (dev_priv->display.initial_watermarks != NULL)
6608 		dev_priv->display.initial_watermarks(state, pipe_config);
6609 
6610 	if (INTEL_GEN(dev_priv) >= 11)
6611 		icl_pipe_mbus_enable(intel_crtc);
6612 
6613 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
6614 	if (!transcoder_is_dsi(cpu_transcoder))
6615 		intel_enable_pipe(pipe_config);
6616 
6617 	if (pipe_config->has_pch_encoder)
6618 		lpt_pch_enable(state, pipe_config);
6619 
6620 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6621 		intel_ddi_set_vc_payload_alloc(pipe_config, true);
6622 
6623 	assert_vblank_disabled(crtc);
6624 	intel_crtc_vblank_on(pipe_config);
6625 
6626 	intel_encoders_enable(intel_crtc, pipe_config, state);
6627 
6628 	if (psl_clkgate_wa) {
6629 		intel_wait_for_vblank(dev_priv, pipe);
6630 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6631 	}
6632 
6633 	/* If we change the relative order between pipe/planes enabling, we need
6634 	 * to change the workaround. */
6635 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6636 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6637 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6638 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6639 	}
6640 }
6641 
6642 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6643 {
6644 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6645 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6646 	enum pipe pipe = crtc->pipe;
6647 
6648 	/* To avoid upsetting the power well on haswell only disable the pfit if
6649 	 * it's in use. The hw state code will make sure we get this right. */
6650 	if (old_crtc_state->pch_pfit.enabled) {
6651 		I915_WRITE(PF_CTL(pipe), 0);
6652 		I915_WRITE(PF_WIN_POS(pipe), 0);
6653 		I915_WRITE(PF_WIN_SZ(pipe), 0);
6654 	}
6655 }
6656 
6657 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6658 				  struct intel_atomic_state *state)
6659 {
6660 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
6661 	struct drm_device *dev = crtc->dev;
6662 	struct drm_i915_private *dev_priv = to_i915(dev);
6663 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6664 	enum pipe pipe = intel_crtc->pipe;
6665 
6666 	/*
6667 	 * Sometimes spurious CPU pipe underruns happen when the
6668 	 * pipe is already disabled, but FDI RX/TX is still enabled.
6669 	 * Happens at least with VGA+HDMI cloning. Suppress them.
6670 	 */
6671 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6672 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6673 
6674 	intel_encoders_disable(intel_crtc, old_crtc_state, state);
6675 
6676 	drm_crtc_vblank_off(crtc);
6677 	assert_vblank_disabled(crtc);
6678 
6679 	intel_disable_pipe(old_crtc_state);
6680 
6681 	ironlake_pfit_disable(old_crtc_state);
6682 
6683 	if (old_crtc_state->has_pch_encoder)
6684 		ironlake_fdi_disable(crtc);
6685 
6686 	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6687 
6688 	if (old_crtc_state->has_pch_encoder) {
6689 		ironlake_disable_pch_transcoder(dev_priv, pipe);
6690 
6691 		if (HAS_PCH_CPT(dev_priv)) {
6692 			i915_reg_t reg;
6693 			u32 temp;
6694 
6695 			/* disable TRANS_DP_CTL */
6696 			reg = TRANS_DP_CTL(pipe);
6697 			temp = I915_READ(reg);
6698 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6699 				  TRANS_DP_PORT_SEL_MASK);
6700 			temp |= TRANS_DP_PORT_SEL_NONE;
6701 			I915_WRITE(reg, temp);
6702 
6703 			/* disable DPLL_SEL */
6704 			temp = I915_READ(PCH_DPLL_SEL);
6705 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6706 			I915_WRITE(PCH_DPLL_SEL, temp);
6707 		}
6708 
6709 		ironlake_fdi_pll_disable(intel_crtc);
6710 	}
6711 
6712 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6713 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6714 }
6715 
6716 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6717 				 struct intel_atomic_state *state)
6718 {
6719 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
6720 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6721 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6722 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6723 
6724 	intel_encoders_disable(intel_crtc, old_crtc_state, state);
6725 
6726 	drm_crtc_vblank_off(crtc);
6727 	assert_vblank_disabled(crtc);
6728 
6729 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
6730 	if (!transcoder_is_dsi(cpu_transcoder))
6731 		intel_disable_pipe(old_crtc_state);
6732 
6733 	if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6734 		intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6735 
6736 	if (INTEL_GEN(dev_priv) >= 11)
6737 		icl_disable_transcoder_port_sync(old_crtc_state);
6738 
6739 	if (!transcoder_is_dsi(cpu_transcoder))
6740 		intel_ddi_disable_transcoder_func(old_crtc_state);
6741 
6742 	intel_dsc_disable(old_crtc_state);
6743 
6744 	if (INTEL_GEN(dev_priv) >= 9)
6745 		skylake_scaler_disable(intel_crtc);
6746 	else
6747 		ironlake_pfit_disable(old_crtc_state);
6748 
6749 	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6750 
6751 	intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
6752 }
6753 
6754 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6755 {
6756 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6757 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6758 
6759 	if (!crtc_state->gmch_pfit.control)
6760 		return;
6761 
6762 	/*
6763 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
6764 	 * according to register description and PRM.
6765 	 */
6766 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6767 	assert_pipe_disabled(dev_priv, crtc->pipe);
6768 
6769 	I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6770 	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6771 
6772 	/* Border color in case we don't scale up to the full screen. Black by
6773 	 * default, change to something else for debugging. */
6774 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
6775 }
6776 
6777 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
6778 {
6779 	if (phy == PHY_NONE)
6780 		return false;
6781 
6782 	if (IS_ELKHARTLAKE(dev_priv))
6783 		return phy <= PHY_C;
6784 
6785 	if (INTEL_GEN(dev_priv) >= 11)
6786 		return phy <= PHY_B;
6787 
6788 	return false;
6789 }
6790 
6791 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
6792 {
6793 	if (INTEL_GEN(dev_priv) >= 12)
6794 		return phy >= PHY_D && phy <= PHY_I;
6795 
6796 	if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6797 		return phy >= PHY_C && phy <= PHY_F;
6798 
6799 	return false;
6800 }
6801 
6802 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
6803 {
6804 	if (IS_ELKHARTLAKE(i915) && port == PORT_D)
6805 		return PHY_A;
6806 
6807 	return (enum phy)port;
6808 }
6809 
6810 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6811 {
6812 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
6813 		return PORT_TC_NONE;
6814 
6815 	if (INTEL_GEN(dev_priv) >= 12)
6816 		return port - PORT_D;
6817 
6818 	return port - PORT_C;
6819 }
6820 
6821 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6822 {
6823 	switch (port) {
6824 	case PORT_A:
6825 		return POWER_DOMAIN_PORT_DDI_A_LANES;
6826 	case PORT_B:
6827 		return POWER_DOMAIN_PORT_DDI_B_LANES;
6828 	case PORT_C:
6829 		return POWER_DOMAIN_PORT_DDI_C_LANES;
6830 	case PORT_D:
6831 		return POWER_DOMAIN_PORT_DDI_D_LANES;
6832 	case PORT_E:
6833 		return POWER_DOMAIN_PORT_DDI_E_LANES;
6834 	case PORT_F:
6835 		return POWER_DOMAIN_PORT_DDI_F_LANES;
6836 	case PORT_G:
6837 		return POWER_DOMAIN_PORT_DDI_G_LANES;
6838 	default:
6839 		MISSING_CASE(port);
6840 		return POWER_DOMAIN_PORT_OTHER;
6841 	}
6842 }
6843 
6844 enum intel_display_power_domain
6845 intel_aux_power_domain(struct intel_digital_port *dig_port)
6846 {
6847 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
6848 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
6849 
6850 	if (intel_phy_is_tc(dev_priv, phy) &&
6851 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
6852 		switch (dig_port->aux_ch) {
6853 		case AUX_CH_C:
6854 			return POWER_DOMAIN_AUX_C_TBT;
6855 		case AUX_CH_D:
6856 			return POWER_DOMAIN_AUX_D_TBT;
6857 		case AUX_CH_E:
6858 			return POWER_DOMAIN_AUX_E_TBT;
6859 		case AUX_CH_F:
6860 			return POWER_DOMAIN_AUX_F_TBT;
6861 		case AUX_CH_G:
6862 			return POWER_DOMAIN_AUX_G_TBT;
6863 		default:
6864 			MISSING_CASE(dig_port->aux_ch);
6865 			return POWER_DOMAIN_AUX_C_TBT;
6866 		}
6867 	}
6868 
6869 	switch (dig_port->aux_ch) {
6870 	case AUX_CH_A:
6871 		return POWER_DOMAIN_AUX_A;
6872 	case AUX_CH_B:
6873 		return POWER_DOMAIN_AUX_B;
6874 	case AUX_CH_C:
6875 		return POWER_DOMAIN_AUX_C;
6876 	case AUX_CH_D:
6877 		return POWER_DOMAIN_AUX_D;
6878 	case AUX_CH_E:
6879 		return POWER_DOMAIN_AUX_E;
6880 	case AUX_CH_F:
6881 		return POWER_DOMAIN_AUX_F;
6882 	case AUX_CH_G:
6883 		return POWER_DOMAIN_AUX_G;
6884 	default:
6885 		MISSING_CASE(dig_port->aux_ch);
6886 		return POWER_DOMAIN_AUX_A;
6887 	}
6888 }
6889 
6890 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6891 {
6892 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6893 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6894 	struct drm_encoder *encoder;
6895 	enum pipe pipe = crtc->pipe;
6896 	u64 mask;
6897 	enum transcoder transcoder = crtc_state->cpu_transcoder;
6898 
6899 	if (!crtc_state->base.active)
6900 		return 0;
6901 
6902 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6903 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6904 	if (crtc_state->pch_pfit.enabled ||
6905 	    crtc_state->pch_pfit.force_thru)
6906 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6907 
6908 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
6909 				  crtc_state->base.encoder_mask) {
6910 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6911 
6912 		mask |= BIT_ULL(intel_encoder->power_domain);
6913 	}
6914 
6915 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6916 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6917 
6918 	if (crtc_state->shared_dpll)
6919 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6920 
6921 	return mask;
6922 }
6923 
6924 static u64
6925 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
6926 {
6927 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6928 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6929 	enum intel_display_power_domain domain;
6930 	u64 domains, new_domains, old_domains;
6931 
6932 	old_domains = crtc->enabled_power_domains;
6933 	crtc->enabled_power_domains = new_domains =
6934 		get_crtc_power_domains(crtc_state);
6935 
6936 	domains = new_domains & ~old_domains;
6937 
6938 	for_each_power_domain(domain, domains)
6939 		intel_display_power_get(dev_priv, domain);
6940 
6941 	return old_domains & ~new_domains;
6942 }
6943 
6944 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6945 				      u64 domains)
6946 {
6947 	enum intel_display_power_domain domain;
6948 
6949 	for_each_power_domain(domain, domains)
6950 		intel_display_power_put_unchecked(dev_priv, domain);
6951 }
6952 
6953 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6954 				   struct intel_atomic_state *state)
6955 {
6956 	struct drm_crtc *crtc = pipe_config->base.crtc;
6957 	struct drm_device *dev = crtc->dev;
6958 	struct drm_i915_private *dev_priv = to_i915(dev);
6959 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6960 	enum pipe pipe = intel_crtc->pipe;
6961 
6962 	if (WARN_ON(intel_crtc->active))
6963 		return;
6964 
6965 	if (intel_crtc_has_dp_encoder(pipe_config))
6966 		intel_dp_set_m_n(pipe_config, M1_N1);
6967 
6968 	intel_set_pipe_timings(pipe_config);
6969 	intel_set_pipe_src_size(pipe_config);
6970 
6971 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6972 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6973 		I915_WRITE(CHV_CANVAS(pipe), 0);
6974 	}
6975 
6976 	i9xx_set_pipeconf(pipe_config);
6977 
6978 	intel_crtc->active = true;
6979 
6980 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6981 
6982 	intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6983 
6984 	if (IS_CHERRYVIEW(dev_priv)) {
6985 		chv_prepare_pll(intel_crtc, pipe_config);
6986 		chv_enable_pll(intel_crtc, pipe_config);
6987 	} else {
6988 		vlv_prepare_pll(intel_crtc, pipe_config);
6989 		vlv_enable_pll(intel_crtc, pipe_config);
6990 	}
6991 
6992 	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6993 
6994 	i9xx_pfit_enable(pipe_config);
6995 
6996 	intel_color_load_luts(pipe_config);
6997 	intel_color_commit(pipe_config);
6998 	/* update DSPCNTR to configure gamma for pipe bottom color */
6999 	intel_disable_primary_plane(pipe_config);
7000 
7001 	dev_priv->display.initial_watermarks(state, pipe_config);
7002 	intel_enable_pipe(pipe_config);
7003 
7004 	assert_vblank_disabled(crtc);
7005 	intel_crtc_vblank_on(pipe_config);
7006 
7007 	intel_encoders_enable(intel_crtc, pipe_config, state);
7008 }
7009 
7010 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7011 {
7012 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7013 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7014 
7015 	I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
7016 	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
7017 }
7018 
7019 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
7020 			     struct intel_atomic_state *state)
7021 {
7022 	struct drm_crtc *crtc = pipe_config->base.crtc;
7023 	struct drm_device *dev = crtc->dev;
7024 	struct drm_i915_private *dev_priv = to_i915(dev);
7025 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7026 	enum pipe pipe = intel_crtc->pipe;
7027 
7028 	if (WARN_ON(intel_crtc->active))
7029 		return;
7030 
7031 	i9xx_set_pll_dividers(pipe_config);
7032 
7033 	if (intel_crtc_has_dp_encoder(pipe_config))
7034 		intel_dp_set_m_n(pipe_config, M1_N1);
7035 
7036 	intel_set_pipe_timings(pipe_config);
7037 	intel_set_pipe_src_size(pipe_config);
7038 
7039 	i9xx_set_pipeconf(pipe_config);
7040 
7041 	intel_crtc->active = true;
7042 
7043 	if (!IS_GEN(dev_priv, 2))
7044 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7045 
7046 	intel_encoders_pre_enable(intel_crtc, pipe_config, state);
7047 
7048 	i9xx_enable_pll(intel_crtc, pipe_config);
7049 
7050 	i9xx_pfit_enable(pipe_config);
7051 
7052 	intel_color_load_luts(pipe_config);
7053 	intel_color_commit(pipe_config);
7054 	/* update DSPCNTR to configure gamma for pipe bottom color */
7055 	intel_disable_primary_plane(pipe_config);
7056 
7057 	if (dev_priv->display.initial_watermarks != NULL)
7058 		dev_priv->display.initial_watermarks(state,
7059 						     pipe_config);
7060 	else
7061 		intel_update_watermarks(intel_crtc);
7062 	intel_enable_pipe(pipe_config);
7063 
7064 	assert_vblank_disabled(crtc);
7065 	intel_crtc_vblank_on(pipe_config);
7066 
7067 	intel_encoders_enable(intel_crtc, pipe_config, state);
7068 }
7069 
7070 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7071 {
7072 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
7073 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7074 
7075 	if (!old_crtc_state->gmch_pfit.control)
7076 		return;
7077 
7078 	assert_pipe_disabled(dev_priv, crtc->pipe);
7079 
7080 	DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
7081 		      I915_READ(PFIT_CONTROL));
7082 	I915_WRITE(PFIT_CONTROL, 0);
7083 }
7084 
7085 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
7086 			      struct intel_atomic_state *state)
7087 {
7088 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
7089 	struct drm_device *dev = crtc->dev;
7090 	struct drm_i915_private *dev_priv = to_i915(dev);
7091 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7092 	enum pipe pipe = intel_crtc->pipe;
7093 
7094 	/*
7095 	 * On gen2 planes are double buffered but the pipe isn't, so we must
7096 	 * wait for planes to fully turn off before disabling the pipe.
7097 	 */
7098 	if (IS_GEN(dev_priv, 2))
7099 		intel_wait_for_vblank(dev_priv, pipe);
7100 
7101 	intel_encoders_disable(intel_crtc, old_crtc_state, state);
7102 
7103 	drm_crtc_vblank_off(crtc);
7104 	assert_vblank_disabled(crtc);
7105 
7106 	intel_disable_pipe(old_crtc_state);
7107 
7108 	i9xx_pfit_disable(old_crtc_state);
7109 
7110 	intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
7111 
7112 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7113 		if (IS_CHERRYVIEW(dev_priv))
7114 			chv_disable_pll(dev_priv, pipe);
7115 		else if (IS_VALLEYVIEW(dev_priv))
7116 			vlv_disable_pll(dev_priv, pipe);
7117 		else
7118 			i9xx_disable_pll(old_crtc_state);
7119 	}
7120 
7121 	intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
7122 
7123 	if (!IS_GEN(dev_priv, 2))
7124 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7125 
7126 	if (!dev_priv->display.initial_watermarks)
7127 		intel_update_watermarks(intel_crtc);
7128 
7129 	/* clock the pipe down to 640x480@60 to potentially save power */
7130 	if (IS_I830(dev_priv))
7131 		i830_enable_pipe(dev_priv, pipe);
7132 }
7133 
7134 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
7135 					struct drm_modeset_acquire_ctx *ctx)
7136 {
7137 	struct intel_encoder *encoder;
7138 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7139 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
7140 	struct intel_bw_state *bw_state =
7141 		to_intel_bw_state(dev_priv->bw_obj.state);
7142 	enum intel_display_power_domain domain;
7143 	struct intel_plane *plane;
7144 	u64 domains;
7145 	struct drm_atomic_state *state;
7146 	struct intel_crtc_state *crtc_state;
7147 	int ret;
7148 
7149 	if (!intel_crtc->active)
7150 		return;
7151 
7152 	for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
7153 		const struct intel_plane_state *plane_state =
7154 			to_intel_plane_state(plane->base.state);
7155 
7156 		if (plane_state->base.visible)
7157 			intel_plane_disable_noatomic(intel_crtc, plane);
7158 	}
7159 
7160 	state = drm_atomic_state_alloc(crtc->dev);
7161 	if (!state) {
7162 		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
7163 			      crtc->base.id, crtc->name);
7164 		return;
7165 	}
7166 
7167 	state->acquire_ctx = ctx;
7168 
7169 	/* Everything's already locked, -EDEADLK can't happen. */
7170 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
7171 	ret = drm_atomic_add_affected_connectors(state, crtc);
7172 
7173 	WARN_ON(IS_ERR(crtc_state) || ret);
7174 
7175 	dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
7176 
7177 	drm_atomic_state_put(state);
7178 
7179 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7180 		      crtc->base.id, crtc->name);
7181 
7182 	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
7183 	crtc->state->active = false;
7184 	intel_crtc->active = false;
7185 	crtc->enabled = false;
7186 	crtc->state->connector_mask = 0;
7187 	crtc->state->encoder_mask = 0;
7188 
7189 	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
7190 		encoder->base.crtc = NULL;
7191 
7192 	intel_fbc_disable(intel_crtc);
7193 	intel_update_watermarks(intel_crtc);
7194 	intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
7195 
7196 	domains = intel_crtc->enabled_power_domains;
7197 	for_each_power_domain(domain, domains)
7198 		intel_display_power_put_unchecked(dev_priv, domain);
7199 	intel_crtc->enabled_power_domains = 0;
7200 
7201 	dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
7202 	dev_priv->min_cdclk[intel_crtc->pipe] = 0;
7203 	dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
7204 
7205 	bw_state->data_rate[intel_crtc->pipe] = 0;
7206 	bw_state->num_active_planes[intel_crtc->pipe] = 0;
7207 }
7208 
7209 /*
7210  * turn all crtc's off, but do not adjust state
7211  * This has to be paired with a call to intel_modeset_setup_hw_state.
7212  */
7213 int intel_display_suspend(struct drm_device *dev)
7214 {
7215 	struct drm_i915_private *dev_priv = to_i915(dev);
7216 	struct drm_atomic_state *state;
7217 	int ret;
7218 
7219 	state = drm_atomic_helper_suspend(dev);
7220 	ret = PTR_ERR_OR_ZERO(state);
7221 	if (ret)
7222 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
7223 	else
7224 		dev_priv->modeset_restore_state = state;
7225 	return ret;
7226 }
7227 
7228 void intel_encoder_destroy(struct drm_encoder *encoder)
7229 {
7230 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7231 
7232 	drm_encoder_cleanup(encoder);
7233 	kfree(intel_encoder);
7234 }
7235 
7236 /* Cross check the actual hw state with our own modeset state tracking (and it's
7237  * internal consistency). */
7238 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7239 					 struct drm_connector_state *conn_state)
7240 {
7241 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
7242 
7243 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
7244 		      connector->base.base.id,
7245 		      connector->base.name);
7246 
7247 	if (connector->get_hw_state(connector)) {
7248 		struct intel_encoder *encoder = connector->encoder;
7249 
7250 		I915_STATE_WARN(!crtc_state,
7251 			 "connector enabled without attached crtc\n");
7252 
7253 		if (!crtc_state)
7254 			return;
7255 
7256 		I915_STATE_WARN(!crtc_state->base.active,
7257 		      "connector is active, but attached crtc isn't\n");
7258 
7259 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7260 			return;
7261 
7262 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7263 			"atomic encoder doesn't match attached encoder\n");
7264 
7265 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7266 			"attached encoder crtc differs from connector crtc\n");
7267 	} else {
7268 		I915_STATE_WARN(crtc_state && crtc_state->base.active,
7269 			"attached crtc is active, but connector isn't\n");
7270 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7271 			"best encoder set without crtc!\n");
7272 	}
7273 }
7274 
7275 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7276 {
7277 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7278 		return crtc_state->fdi_lanes;
7279 
7280 	return 0;
7281 }
7282 
7283 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7284 				     struct intel_crtc_state *pipe_config)
7285 {
7286 	struct drm_i915_private *dev_priv = to_i915(dev);
7287 	struct drm_atomic_state *state = pipe_config->base.state;
7288 	struct intel_crtc *other_crtc;
7289 	struct intel_crtc_state *other_crtc_state;
7290 
7291 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7292 		      pipe_name(pipe), pipe_config->fdi_lanes);
7293 	if (pipe_config->fdi_lanes > 4) {
7294 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7295 			      pipe_name(pipe), pipe_config->fdi_lanes);
7296 		return -EINVAL;
7297 	}
7298 
7299 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7300 		if (pipe_config->fdi_lanes > 2) {
7301 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7302 				      pipe_config->fdi_lanes);
7303 			return -EINVAL;
7304 		} else {
7305 			return 0;
7306 		}
7307 	}
7308 
7309 	if (INTEL_NUM_PIPES(dev_priv) == 2)
7310 		return 0;
7311 
7312 	/* Ivybridge 3 pipe is really complicated */
7313 	switch (pipe) {
7314 	case PIPE_A:
7315 		return 0;
7316 	case PIPE_B:
7317 		if (pipe_config->fdi_lanes <= 2)
7318 			return 0;
7319 
7320 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7321 		other_crtc_state =
7322 			intel_atomic_get_crtc_state(state, other_crtc);
7323 		if (IS_ERR(other_crtc_state))
7324 			return PTR_ERR(other_crtc_state);
7325 
7326 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7327 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7328 				      pipe_name(pipe), pipe_config->fdi_lanes);
7329 			return -EINVAL;
7330 		}
7331 		return 0;
7332 	case PIPE_C:
7333 		if (pipe_config->fdi_lanes > 2) {
7334 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7335 				      pipe_name(pipe), pipe_config->fdi_lanes);
7336 			return -EINVAL;
7337 		}
7338 
7339 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7340 		other_crtc_state =
7341 			intel_atomic_get_crtc_state(state, other_crtc);
7342 		if (IS_ERR(other_crtc_state))
7343 			return PTR_ERR(other_crtc_state);
7344 
7345 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7346 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7347 			return -EINVAL;
7348 		}
7349 		return 0;
7350 	default:
7351 		BUG();
7352 	}
7353 }
7354 
7355 #define RETRY 1
7356 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7357 				       struct intel_crtc_state *pipe_config)
7358 {
7359 	struct drm_device *dev = intel_crtc->base.dev;
7360 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7361 	int lane, link_bw, fdi_dotclock, ret;
7362 	bool needs_recompute = false;
7363 
7364 retry:
7365 	/* FDI is a binary signal running at ~2.7GHz, encoding
7366 	 * each output octet as 10 bits. The actual frequency
7367 	 * is stored as a divider into a 100MHz clock, and the
7368 	 * mode pixel clock is stored in units of 1KHz.
7369 	 * Hence the bw of each lane in terms of the mode signal
7370 	 * is:
7371 	 */
7372 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7373 
7374 	fdi_dotclock = adjusted_mode->crtc_clock;
7375 
7376 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7377 					   pipe_config->pipe_bpp);
7378 
7379 	pipe_config->fdi_lanes = lane;
7380 
7381 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7382 			       link_bw, &pipe_config->fdi_m_n, false, false);
7383 
7384 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7385 	if (ret == -EDEADLK)
7386 		return ret;
7387 
7388 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7389 		pipe_config->pipe_bpp -= 2*3;
7390 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7391 			      pipe_config->pipe_bpp);
7392 		needs_recompute = true;
7393 		pipe_config->bw_constrained = true;
7394 
7395 		goto retry;
7396 	}
7397 
7398 	if (needs_recompute)
7399 		return RETRY;
7400 
7401 	return ret;
7402 }
7403 
7404 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7405 {
7406 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7407 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7408 
7409 	/* IPS only exists on ULT machines and is tied to pipe A. */
7410 	if (!hsw_crtc_supports_ips(crtc))
7411 		return false;
7412 
7413 	if (!i915_modparams.enable_ips)
7414 		return false;
7415 
7416 	if (crtc_state->pipe_bpp > 24)
7417 		return false;
7418 
7419 	/*
7420 	 * We compare against max which means we must take
7421 	 * the increased cdclk requirement into account when
7422 	 * calculating the new cdclk.
7423 	 *
7424 	 * Should measure whether using a lower cdclk w/o IPS
7425 	 */
7426 	if (IS_BROADWELL(dev_priv) &&
7427 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7428 		return false;
7429 
7430 	return true;
7431 }
7432 
7433 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7434 {
7435 	struct drm_i915_private *dev_priv =
7436 		to_i915(crtc_state->base.crtc->dev);
7437 	struct intel_atomic_state *intel_state =
7438 		to_intel_atomic_state(crtc_state->base.state);
7439 
7440 	if (!hsw_crtc_state_ips_capable(crtc_state))
7441 		return false;
7442 
7443 	/*
7444 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7445 	 * enabled and disabled dynamically based on package C states,
7446 	 * user space can't make reliable use of the CRCs, so let's just
7447 	 * completely disable it.
7448 	 */
7449 	if (crtc_state->crc_enabled)
7450 		return false;
7451 
7452 	/* IPS should be fine as long as at least one plane is enabled. */
7453 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7454 		return false;
7455 
7456 	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7457 	if (IS_BROADWELL(dev_priv) &&
7458 	    crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7459 		return false;
7460 
7461 	return true;
7462 }
7463 
7464 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7465 {
7466 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7467 
7468 	/* GDG double wide on either pipe, otherwise pipe A only */
7469 	return INTEL_GEN(dev_priv) < 4 &&
7470 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7471 }
7472 
7473 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7474 {
7475 	u32 pixel_rate;
7476 
7477 	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7478 
7479 	/*
7480 	 * We only use IF-ID interlacing. If we ever use
7481 	 * PF-ID we'll need to adjust the pixel_rate here.
7482 	 */
7483 
7484 	if (pipe_config->pch_pfit.enabled) {
7485 		u64 pipe_w, pipe_h, pfit_w, pfit_h;
7486 		u32 pfit_size = pipe_config->pch_pfit.size;
7487 
7488 		pipe_w = pipe_config->pipe_src_w;
7489 		pipe_h = pipe_config->pipe_src_h;
7490 
7491 		pfit_w = (pfit_size >> 16) & 0xFFFF;
7492 		pfit_h = pfit_size & 0xFFFF;
7493 		if (pipe_w < pfit_w)
7494 			pipe_w = pfit_w;
7495 		if (pipe_h < pfit_h)
7496 			pipe_h = pfit_h;
7497 
7498 		if (WARN_ON(!pfit_w || !pfit_h))
7499 			return pixel_rate;
7500 
7501 		pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7502 				     pfit_w * pfit_h);
7503 	}
7504 
7505 	return pixel_rate;
7506 }
7507 
7508 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7509 {
7510 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7511 
7512 	if (HAS_GMCH(dev_priv))
7513 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
7514 		crtc_state->pixel_rate =
7515 			crtc_state->base.adjusted_mode.crtc_clock;
7516 	else
7517 		crtc_state->pixel_rate =
7518 			ilk_pipe_pixel_rate(crtc_state);
7519 }
7520 
7521 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7522 				     struct intel_crtc_state *pipe_config)
7523 {
7524 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7525 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7526 	int clock_limit = dev_priv->max_dotclk_freq;
7527 
7528 	if (INTEL_GEN(dev_priv) < 4) {
7529 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7530 
7531 		/*
7532 		 * Enable double wide mode when the dot clock
7533 		 * is > 90% of the (display) core speed.
7534 		 */
7535 		if (intel_crtc_supports_double_wide(crtc) &&
7536 		    adjusted_mode->crtc_clock > clock_limit) {
7537 			clock_limit = dev_priv->max_dotclk_freq;
7538 			pipe_config->double_wide = true;
7539 		}
7540 	}
7541 
7542 	if (adjusted_mode->crtc_clock > clock_limit) {
7543 		DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7544 			      adjusted_mode->crtc_clock, clock_limit,
7545 			      yesno(pipe_config->double_wide));
7546 		return -EINVAL;
7547 	}
7548 
7549 	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7550 	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7551 	     pipe_config->base.ctm) {
7552 		/*
7553 		 * There is only one pipe CSC unit per pipe, and we need that
7554 		 * for output conversion from RGB->YCBCR. So if CTM is already
7555 		 * applied we can't support YCBCR420 output.
7556 		 */
7557 		DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7558 		return -EINVAL;
7559 	}
7560 
7561 	/*
7562 	 * Pipe horizontal size must be even in:
7563 	 * - DVO ganged mode
7564 	 * - LVDS dual channel mode
7565 	 * - Double wide pipe
7566 	 */
7567 	if (pipe_config->pipe_src_w & 1) {
7568 		if (pipe_config->double_wide) {
7569 			DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7570 			return -EINVAL;
7571 		}
7572 
7573 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7574 		    intel_is_dual_link_lvds(dev_priv)) {
7575 			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7576 			return -EINVAL;
7577 		}
7578 	}
7579 
7580 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
7581 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7582 	 */
7583 	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7584 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7585 		return -EINVAL;
7586 
7587 	intel_crtc_compute_pixel_rate(pipe_config);
7588 
7589 	if (pipe_config->has_pch_encoder)
7590 		return ironlake_fdi_compute_config(crtc, pipe_config);
7591 
7592 	return 0;
7593 }
7594 
7595 static void
7596 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7597 {
7598 	while (*num > DATA_LINK_M_N_MASK ||
7599 	       *den > DATA_LINK_M_N_MASK) {
7600 		*num >>= 1;
7601 		*den >>= 1;
7602 	}
7603 }
7604 
7605 static void compute_m_n(unsigned int m, unsigned int n,
7606 			u32 *ret_m, u32 *ret_n,
7607 			bool constant_n)
7608 {
7609 	/*
7610 	 * Several DP dongles in particular seem to be fussy about
7611 	 * too large link M/N values. Give N value as 0x8000 that
7612 	 * should be acceptable by specific devices. 0x8000 is the
7613 	 * specified fixed N value for asynchronous clock mode,
7614 	 * which the devices expect also in synchronous clock mode.
7615 	 */
7616 	if (constant_n)
7617 		*ret_n = 0x8000;
7618 	else
7619 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7620 
7621 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7622 	intel_reduce_m_n_ratio(ret_m, ret_n);
7623 }
7624 
7625 void
7626 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7627 		       int pixel_clock, int link_clock,
7628 		       struct intel_link_m_n *m_n,
7629 		       bool constant_n, bool fec_enable)
7630 {
7631 	u32 data_clock = bits_per_pixel * pixel_clock;
7632 
7633 	if (fec_enable)
7634 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
7635 
7636 	m_n->tu = 64;
7637 	compute_m_n(data_clock,
7638 		    link_clock * nlanes * 8,
7639 		    &m_n->gmch_m, &m_n->gmch_n,
7640 		    constant_n);
7641 
7642 	compute_m_n(pixel_clock, link_clock,
7643 		    &m_n->link_m, &m_n->link_n,
7644 		    constant_n);
7645 }
7646 
7647 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
7648 {
7649 	/*
7650 	 * There may be no VBT; and if the BIOS enabled SSC we can
7651 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
7652 	 * BIOS isn't using it, don't assume it will work even if the VBT
7653 	 * indicates as much.
7654 	 */
7655 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7656 		bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
7657 			DREF_SSC1_ENABLE;
7658 
7659 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
7660 			DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
7661 				      enableddisabled(bios_lvds_use_ssc),
7662 				      enableddisabled(dev_priv->vbt.lvds_use_ssc));
7663 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
7664 		}
7665 	}
7666 }
7667 
7668 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7669 {
7670 	if (i915_modparams.panel_use_ssc >= 0)
7671 		return i915_modparams.panel_use_ssc != 0;
7672 	return dev_priv->vbt.lvds_use_ssc
7673 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7674 }
7675 
7676 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7677 {
7678 	return (1 << dpll->n) << 16 | dpll->m2;
7679 }
7680 
7681 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7682 {
7683 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7684 }
7685 
7686 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7687 				     struct intel_crtc_state *crtc_state,
7688 				     struct dpll *reduced_clock)
7689 {
7690 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7691 	u32 fp, fp2 = 0;
7692 
7693 	if (IS_PINEVIEW(dev_priv)) {
7694 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7695 		if (reduced_clock)
7696 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7697 	} else {
7698 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7699 		if (reduced_clock)
7700 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7701 	}
7702 
7703 	crtc_state->dpll_hw_state.fp0 = fp;
7704 
7705 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7706 	    reduced_clock) {
7707 		crtc_state->dpll_hw_state.fp1 = fp2;
7708 	} else {
7709 		crtc_state->dpll_hw_state.fp1 = fp;
7710 	}
7711 }
7712 
7713 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7714 		pipe)
7715 {
7716 	u32 reg_val;
7717 
7718 	/*
7719 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7720 	 * and set it to a reasonable value instead.
7721 	 */
7722 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7723 	reg_val &= 0xffffff00;
7724 	reg_val |= 0x00000030;
7725 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7726 
7727 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7728 	reg_val &= 0x00ffffff;
7729 	reg_val |= 0x8c000000;
7730 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7731 
7732 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7733 	reg_val &= 0xffffff00;
7734 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7735 
7736 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7737 	reg_val &= 0x00ffffff;
7738 	reg_val |= 0xb0000000;
7739 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7740 }
7741 
7742 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7743 					 const struct intel_link_m_n *m_n)
7744 {
7745 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7746 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7747 	enum pipe pipe = crtc->pipe;
7748 
7749 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7750 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7751 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7752 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7753 }
7754 
7755 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7756 				 enum transcoder transcoder)
7757 {
7758 	if (IS_HASWELL(dev_priv))
7759 		return transcoder == TRANSCODER_EDP;
7760 
7761 	/*
7762 	 * Strictly speaking some registers are available before
7763 	 * gen7, but we only support DRRS on gen7+
7764 	 */
7765 	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7766 }
7767 
7768 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7769 					 const struct intel_link_m_n *m_n,
7770 					 const struct intel_link_m_n *m2_n2)
7771 {
7772 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7773 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7774 	enum pipe pipe = crtc->pipe;
7775 	enum transcoder transcoder = crtc_state->cpu_transcoder;
7776 
7777 	if (INTEL_GEN(dev_priv) >= 5) {
7778 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7779 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7780 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7781 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7782 		/*
7783 		 *  M2_N2 registers are set only if DRRS is supported
7784 		 * (to make sure the registers are not unnecessarily accessed).
7785 		 */
7786 		if (m2_n2 && crtc_state->has_drrs &&
7787 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
7788 			I915_WRITE(PIPE_DATA_M2(transcoder),
7789 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7790 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7791 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7792 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7793 		}
7794 	} else {
7795 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7796 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7797 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7798 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7799 	}
7800 }
7801 
7802 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7803 {
7804 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7805 
7806 	if (m_n == M1_N1) {
7807 		dp_m_n = &crtc_state->dp_m_n;
7808 		dp_m2_n2 = &crtc_state->dp_m2_n2;
7809 	} else if (m_n == M2_N2) {
7810 
7811 		/*
7812 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7813 		 * needs to be programmed into M1_N1.
7814 		 */
7815 		dp_m_n = &crtc_state->dp_m2_n2;
7816 	} else {
7817 		DRM_ERROR("Unsupported divider value\n");
7818 		return;
7819 	}
7820 
7821 	if (crtc_state->has_pch_encoder)
7822 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7823 	else
7824 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7825 }
7826 
7827 static void vlv_compute_dpll(struct intel_crtc *crtc,
7828 			     struct intel_crtc_state *pipe_config)
7829 {
7830 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7831 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7832 	if (crtc->pipe != PIPE_A)
7833 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7834 
7835 	/* DPLL not used with DSI, but still need the rest set up */
7836 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7837 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7838 			DPLL_EXT_BUFFER_ENABLE_VLV;
7839 
7840 	pipe_config->dpll_hw_state.dpll_md =
7841 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7842 }
7843 
7844 static void chv_compute_dpll(struct intel_crtc *crtc,
7845 			     struct intel_crtc_state *pipe_config)
7846 {
7847 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7848 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7849 	if (crtc->pipe != PIPE_A)
7850 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7851 
7852 	/* DPLL not used with DSI, but still need the rest set up */
7853 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7854 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7855 
7856 	pipe_config->dpll_hw_state.dpll_md =
7857 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7858 }
7859 
7860 static void vlv_prepare_pll(struct intel_crtc *crtc,
7861 			    const struct intel_crtc_state *pipe_config)
7862 {
7863 	struct drm_device *dev = crtc->base.dev;
7864 	struct drm_i915_private *dev_priv = to_i915(dev);
7865 	enum pipe pipe = crtc->pipe;
7866 	u32 mdiv;
7867 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7868 	u32 coreclk, reg_val;
7869 
7870 	/* Enable Refclk */
7871 	I915_WRITE(DPLL(pipe),
7872 		   pipe_config->dpll_hw_state.dpll &
7873 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7874 
7875 	/* No need to actually set up the DPLL with DSI */
7876 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7877 		return;
7878 
7879 	vlv_dpio_get(dev_priv);
7880 
7881 	bestn = pipe_config->dpll.n;
7882 	bestm1 = pipe_config->dpll.m1;
7883 	bestm2 = pipe_config->dpll.m2;
7884 	bestp1 = pipe_config->dpll.p1;
7885 	bestp2 = pipe_config->dpll.p2;
7886 
7887 	/* See eDP HDMI DPIO driver vbios notes doc */
7888 
7889 	/* PLL B needs special handling */
7890 	if (pipe == PIPE_B)
7891 		vlv_pllb_recal_opamp(dev_priv, pipe);
7892 
7893 	/* Set up Tx target for periodic Rcomp update */
7894 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7895 
7896 	/* Disable target IRef on PLL */
7897 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7898 	reg_val &= 0x00ffffff;
7899 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7900 
7901 	/* Disable fast lock */
7902 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7903 
7904 	/* Set idtafcrecal before PLL is enabled */
7905 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7906 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7907 	mdiv |= ((bestn << DPIO_N_SHIFT));
7908 	mdiv |= (1 << DPIO_K_SHIFT);
7909 
7910 	/*
7911 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7912 	 * but we don't support that).
7913 	 * Note: don't use the DAC post divider as it seems unstable.
7914 	 */
7915 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7916 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7917 
7918 	mdiv |= DPIO_ENABLE_CALIBRATION;
7919 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7920 
7921 	/* Set HBR and RBR LPF coefficients */
7922 	if (pipe_config->port_clock == 162000 ||
7923 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7924 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7925 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7926 				 0x009f0003);
7927 	else
7928 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7929 				 0x00d0000f);
7930 
7931 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7932 		/* Use SSC source */
7933 		if (pipe == PIPE_A)
7934 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7935 					 0x0df40000);
7936 		else
7937 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7938 					 0x0df70000);
7939 	} else { /* HDMI or VGA */
7940 		/* Use bend source */
7941 		if (pipe == PIPE_A)
7942 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7943 					 0x0df70000);
7944 		else
7945 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7946 					 0x0df40000);
7947 	}
7948 
7949 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7950 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7951 	if (intel_crtc_has_dp_encoder(pipe_config))
7952 		coreclk |= 0x01000000;
7953 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7954 
7955 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7956 
7957 	vlv_dpio_put(dev_priv);
7958 }
7959 
7960 static void chv_prepare_pll(struct intel_crtc *crtc,
7961 			    const struct intel_crtc_state *pipe_config)
7962 {
7963 	struct drm_device *dev = crtc->base.dev;
7964 	struct drm_i915_private *dev_priv = to_i915(dev);
7965 	enum pipe pipe = crtc->pipe;
7966 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7967 	u32 loopfilter, tribuf_calcntr;
7968 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7969 	u32 dpio_val;
7970 	int vco;
7971 
7972 	/* Enable Refclk and SSC */
7973 	I915_WRITE(DPLL(pipe),
7974 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7975 
7976 	/* No need to actually set up the DPLL with DSI */
7977 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7978 		return;
7979 
7980 	bestn = pipe_config->dpll.n;
7981 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7982 	bestm1 = pipe_config->dpll.m1;
7983 	bestm2 = pipe_config->dpll.m2 >> 22;
7984 	bestp1 = pipe_config->dpll.p1;
7985 	bestp2 = pipe_config->dpll.p2;
7986 	vco = pipe_config->dpll.vco;
7987 	dpio_val = 0;
7988 	loopfilter = 0;
7989 
7990 	vlv_dpio_get(dev_priv);
7991 
7992 	/* p1 and p2 divider */
7993 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7994 			5 << DPIO_CHV_S1_DIV_SHIFT |
7995 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7996 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7997 			1 << DPIO_CHV_K_DIV_SHIFT);
7998 
7999 	/* Feedback post-divider - m2 */
8000 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8001 
8002 	/* Feedback refclk divider - n and m1 */
8003 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8004 			DPIO_CHV_M1_DIV_BY_2 |
8005 			1 << DPIO_CHV_N_DIV_SHIFT);
8006 
8007 	/* M2 fraction division */
8008 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8009 
8010 	/* M2 fraction division enable */
8011 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8012 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8013 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8014 	if (bestm2_frac)
8015 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8016 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8017 
8018 	/* Program digital lock detect threshold */
8019 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8020 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8021 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8022 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8023 	if (!bestm2_frac)
8024 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8025 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8026 
8027 	/* Loop filter */
8028 	if (vco == 5400000) {
8029 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8030 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8031 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8032 		tribuf_calcntr = 0x9;
8033 	} else if (vco <= 6200000) {
8034 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8035 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8036 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8037 		tribuf_calcntr = 0x9;
8038 	} else if (vco <= 6480000) {
8039 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8040 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8041 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8042 		tribuf_calcntr = 0x8;
8043 	} else {
8044 		/* Not supported. Apply the same limits as in the max case */
8045 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8046 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8047 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8048 		tribuf_calcntr = 0;
8049 	}
8050 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8051 
8052 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8053 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8054 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8055 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8056 
8057 	/* AFC Recal */
8058 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8059 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8060 			DPIO_AFC_RECAL);
8061 
8062 	vlv_dpio_put(dev_priv);
8063 }
8064 
8065 /**
8066  * vlv_force_pll_on - forcibly enable just the PLL
8067  * @dev_priv: i915 private structure
8068  * @pipe: pipe PLL to enable
8069  * @dpll: PLL configuration
8070  *
8071  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8072  * in cases where we need the PLL enabled even when @pipe is not going to
8073  * be enabled.
8074  */
8075 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8076 		     const struct dpll *dpll)
8077 {
8078 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8079 	struct intel_crtc_state *pipe_config;
8080 
8081 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8082 	if (!pipe_config)
8083 		return -ENOMEM;
8084 
8085 	pipe_config->base.crtc = &crtc->base;
8086 	pipe_config->pixel_multiplier = 1;
8087 	pipe_config->dpll = *dpll;
8088 
8089 	if (IS_CHERRYVIEW(dev_priv)) {
8090 		chv_compute_dpll(crtc, pipe_config);
8091 		chv_prepare_pll(crtc, pipe_config);
8092 		chv_enable_pll(crtc, pipe_config);
8093 	} else {
8094 		vlv_compute_dpll(crtc, pipe_config);
8095 		vlv_prepare_pll(crtc, pipe_config);
8096 		vlv_enable_pll(crtc, pipe_config);
8097 	}
8098 
8099 	kfree(pipe_config);
8100 
8101 	return 0;
8102 }
8103 
8104 /**
8105  * vlv_force_pll_off - forcibly disable just the PLL
8106  * @dev_priv: i915 private structure
8107  * @pipe: pipe PLL to disable
8108  *
8109  * Disable the PLL for @pipe. To be used in cases where we need
8110  * the PLL enabled even when @pipe is not going to be enabled.
8111  */
8112 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8113 {
8114 	if (IS_CHERRYVIEW(dev_priv))
8115 		chv_disable_pll(dev_priv, pipe);
8116 	else
8117 		vlv_disable_pll(dev_priv, pipe);
8118 }
8119 
8120 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8121 			      struct intel_crtc_state *crtc_state,
8122 			      struct dpll *reduced_clock)
8123 {
8124 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8125 	u32 dpll;
8126 	struct dpll *clock = &crtc_state->dpll;
8127 
8128 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8129 
8130 	dpll = DPLL_VGA_MODE_DIS;
8131 
8132 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8133 		dpll |= DPLLB_MODE_LVDS;
8134 	else
8135 		dpll |= DPLLB_MODE_DAC_SERIAL;
8136 
8137 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8138 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8139 		dpll |= (crtc_state->pixel_multiplier - 1)
8140 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
8141 	}
8142 
8143 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8144 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8145 		dpll |= DPLL_SDVO_HIGH_SPEED;
8146 
8147 	if (intel_crtc_has_dp_encoder(crtc_state))
8148 		dpll |= DPLL_SDVO_HIGH_SPEED;
8149 
8150 	/* compute bitmask from p1 value */
8151 	if (IS_PINEVIEW(dev_priv))
8152 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8153 	else {
8154 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8155 		if (IS_G4X(dev_priv) && reduced_clock)
8156 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8157 	}
8158 	switch (clock->p2) {
8159 	case 5:
8160 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8161 		break;
8162 	case 7:
8163 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8164 		break;
8165 	case 10:
8166 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8167 		break;
8168 	case 14:
8169 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8170 		break;
8171 	}
8172 	if (INTEL_GEN(dev_priv) >= 4)
8173 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8174 
8175 	if (crtc_state->sdvo_tv_clock)
8176 		dpll |= PLL_REF_INPUT_TVCLKINBC;
8177 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8178 		 intel_panel_use_ssc(dev_priv))
8179 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8180 	else
8181 		dpll |= PLL_REF_INPUT_DREFCLK;
8182 
8183 	dpll |= DPLL_VCO_ENABLE;
8184 	crtc_state->dpll_hw_state.dpll = dpll;
8185 
8186 	if (INTEL_GEN(dev_priv) >= 4) {
8187 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8188 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
8189 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
8190 	}
8191 }
8192 
8193 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8194 			      struct intel_crtc_state *crtc_state,
8195 			      struct dpll *reduced_clock)
8196 {
8197 	struct drm_device *dev = crtc->base.dev;
8198 	struct drm_i915_private *dev_priv = to_i915(dev);
8199 	u32 dpll;
8200 	struct dpll *clock = &crtc_state->dpll;
8201 
8202 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8203 
8204 	dpll = DPLL_VGA_MODE_DIS;
8205 
8206 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8207 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8208 	} else {
8209 		if (clock->p1 == 2)
8210 			dpll |= PLL_P1_DIVIDE_BY_TWO;
8211 		else
8212 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8213 		if (clock->p2 == 4)
8214 			dpll |= PLL_P2_DIVIDE_BY_4;
8215 	}
8216 
8217 	/*
8218 	 * Bspec:
8219 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
8220 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8221 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8222 	 *  Enable) must be set to “1” in both the DPLL A Control Register
8223 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8224 	 *
8225 	 * For simplicity We simply keep both bits always enabled in
8226 	 * both DPLLS. The spec says we should disable the DVO 2X clock
8227 	 * when not needed, but this seems to work fine in practice.
8228 	 */
8229 	if (IS_I830(dev_priv) ||
8230 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8231 		dpll |= DPLL_DVO_2X_MODE;
8232 
8233 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8234 	    intel_panel_use_ssc(dev_priv))
8235 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8236 	else
8237 		dpll |= PLL_REF_INPUT_DREFCLK;
8238 
8239 	dpll |= DPLL_VCO_ENABLE;
8240 	crtc_state->dpll_hw_state.dpll = dpll;
8241 }
8242 
8243 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8244 {
8245 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8246 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8247 	enum pipe pipe = crtc->pipe;
8248 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8249 	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
8250 	u32 crtc_vtotal, crtc_vblank_end;
8251 	int vsyncshift = 0;
8252 
8253 	/* We need to be careful not to changed the adjusted mode, for otherwise
8254 	 * the hw state checker will get angry at the mismatch. */
8255 	crtc_vtotal = adjusted_mode->crtc_vtotal;
8256 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8257 
8258 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8259 		/* the chip adds 2 halflines automatically */
8260 		crtc_vtotal -= 1;
8261 		crtc_vblank_end -= 1;
8262 
8263 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8264 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8265 		else
8266 			vsyncshift = adjusted_mode->crtc_hsync_start -
8267 				adjusted_mode->crtc_htotal / 2;
8268 		if (vsyncshift < 0)
8269 			vsyncshift += adjusted_mode->crtc_htotal;
8270 	}
8271 
8272 	if (INTEL_GEN(dev_priv) > 3)
8273 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8274 
8275 	I915_WRITE(HTOTAL(cpu_transcoder),
8276 		   (adjusted_mode->crtc_hdisplay - 1) |
8277 		   ((adjusted_mode->crtc_htotal - 1) << 16));
8278 	I915_WRITE(HBLANK(cpu_transcoder),
8279 		   (adjusted_mode->crtc_hblank_start - 1) |
8280 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
8281 	I915_WRITE(HSYNC(cpu_transcoder),
8282 		   (adjusted_mode->crtc_hsync_start - 1) |
8283 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
8284 
8285 	I915_WRITE(VTOTAL(cpu_transcoder),
8286 		   (adjusted_mode->crtc_vdisplay - 1) |
8287 		   ((crtc_vtotal - 1) << 16));
8288 	I915_WRITE(VBLANK(cpu_transcoder),
8289 		   (adjusted_mode->crtc_vblank_start - 1) |
8290 		   ((crtc_vblank_end - 1) << 16));
8291 	I915_WRITE(VSYNC(cpu_transcoder),
8292 		   (adjusted_mode->crtc_vsync_start - 1) |
8293 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
8294 
8295 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8296 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8297 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8298 	 * bits. */
8299 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8300 	    (pipe == PIPE_B || pipe == PIPE_C))
8301 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8302 
8303 }
8304 
8305 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8306 {
8307 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8308 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8309 	enum pipe pipe = crtc->pipe;
8310 
8311 	/* pipesrc controls the size that is scaled from, which should
8312 	 * always be the user's requested size.
8313 	 */
8314 	I915_WRITE(PIPESRC(pipe),
8315 		   ((crtc_state->pipe_src_w - 1) << 16) |
8316 		   (crtc_state->pipe_src_h - 1));
8317 }
8318 
8319 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8320 {
8321 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
8322 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8323 
8324 	if (IS_GEN(dev_priv, 2))
8325 		return false;
8326 
8327 	if (INTEL_GEN(dev_priv) >= 9 ||
8328 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8329 		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8330 	else
8331 		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8332 }
8333 
8334 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8335 				   struct intel_crtc_state *pipe_config)
8336 {
8337 	struct drm_device *dev = crtc->base.dev;
8338 	struct drm_i915_private *dev_priv = to_i915(dev);
8339 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8340 	u32 tmp;
8341 
8342 	tmp = I915_READ(HTOTAL(cpu_transcoder));
8343 	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8344 	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8345 
8346 	if (!transcoder_is_dsi(cpu_transcoder)) {
8347 		tmp = I915_READ(HBLANK(cpu_transcoder));
8348 		pipe_config->base.adjusted_mode.crtc_hblank_start =
8349 							(tmp & 0xffff) + 1;
8350 		pipe_config->base.adjusted_mode.crtc_hblank_end =
8351 						((tmp >> 16) & 0xffff) + 1;
8352 	}
8353 	tmp = I915_READ(HSYNC(cpu_transcoder));
8354 	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8355 	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8356 
8357 	tmp = I915_READ(VTOTAL(cpu_transcoder));
8358 	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8359 	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8360 
8361 	if (!transcoder_is_dsi(cpu_transcoder)) {
8362 		tmp = I915_READ(VBLANK(cpu_transcoder));
8363 		pipe_config->base.adjusted_mode.crtc_vblank_start =
8364 							(tmp & 0xffff) + 1;
8365 		pipe_config->base.adjusted_mode.crtc_vblank_end =
8366 						((tmp >> 16) & 0xffff) + 1;
8367 	}
8368 	tmp = I915_READ(VSYNC(cpu_transcoder));
8369 	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8370 	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8371 
8372 	if (intel_pipe_is_interlaced(pipe_config)) {
8373 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8374 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8375 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8376 	}
8377 }
8378 
8379 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8380 				    struct intel_crtc_state *pipe_config)
8381 {
8382 	struct drm_device *dev = crtc->base.dev;
8383 	struct drm_i915_private *dev_priv = to_i915(dev);
8384 	u32 tmp;
8385 
8386 	tmp = I915_READ(PIPESRC(crtc->pipe));
8387 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8388 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8389 
8390 	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8391 	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8392 }
8393 
8394 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8395 				 struct intel_crtc_state *pipe_config)
8396 {
8397 	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8398 	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8399 	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8400 	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8401 
8402 	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8403 	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8404 	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8405 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8406 
8407 	mode->flags = pipe_config->base.adjusted_mode.flags;
8408 	mode->type = DRM_MODE_TYPE_DRIVER;
8409 
8410 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8411 
8412 	mode->hsync = drm_mode_hsync(mode);
8413 	mode->vrefresh = drm_mode_vrefresh(mode);
8414 	drm_mode_set_name(mode);
8415 }
8416 
8417 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8418 {
8419 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8420 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8421 	u32 pipeconf;
8422 
8423 	pipeconf = 0;
8424 
8425 	/* we keep both pipes enabled on 830 */
8426 	if (IS_I830(dev_priv))
8427 		pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8428 
8429 	if (crtc_state->double_wide)
8430 		pipeconf |= PIPECONF_DOUBLE_WIDE;
8431 
8432 	/* only g4x and later have fancy bpc/dither controls */
8433 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8434 	    IS_CHERRYVIEW(dev_priv)) {
8435 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
8436 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8437 			pipeconf |= PIPECONF_DITHER_EN |
8438 				    PIPECONF_DITHER_TYPE_SP;
8439 
8440 		switch (crtc_state->pipe_bpp) {
8441 		case 18:
8442 			pipeconf |= PIPECONF_6BPC;
8443 			break;
8444 		case 24:
8445 			pipeconf |= PIPECONF_8BPC;
8446 			break;
8447 		case 30:
8448 			pipeconf |= PIPECONF_10BPC;
8449 			break;
8450 		default:
8451 			/* Case prevented by intel_choose_pipe_bpp_dither. */
8452 			BUG();
8453 		}
8454 	}
8455 
8456 	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8457 		if (INTEL_GEN(dev_priv) < 4 ||
8458 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8459 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8460 		else
8461 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8462 	} else {
8463 		pipeconf |= PIPECONF_PROGRESSIVE;
8464 	}
8465 
8466 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8467 	     crtc_state->limited_color_range)
8468 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8469 
8470 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8471 
8472 	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8473 	POSTING_READ(PIPECONF(crtc->pipe));
8474 }
8475 
8476 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8477 				   struct intel_crtc_state *crtc_state)
8478 {
8479 	struct drm_device *dev = crtc->base.dev;
8480 	struct drm_i915_private *dev_priv = to_i915(dev);
8481 	const struct intel_limit *limit;
8482 	int refclk = 48000;
8483 
8484 	memset(&crtc_state->dpll_hw_state, 0,
8485 	       sizeof(crtc_state->dpll_hw_state));
8486 
8487 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8488 		if (intel_panel_use_ssc(dev_priv)) {
8489 			refclk = dev_priv->vbt.lvds_ssc_freq;
8490 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8491 		}
8492 
8493 		limit = &intel_limits_i8xx_lvds;
8494 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8495 		limit = &intel_limits_i8xx_dvo;
8496 	} else {
8497 		limit = &intel_limits_i8xx_dac;
8498 	}
8499 
8500 	if (!crtc_state->clock_set &&
8501 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8502 				 refclk, NULL, &crtc_state->dpll)) {
8503 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8504 		return -EINVAL;
8505 	}
8506 
8507 	i8xx_compute_dpll(crtc, crtc_state, NULL);
8508 
8509 	return 0;
8510 }
8511 
8512 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8513 				  struct intel_crtc_state *crtc_state)
8514 {
8515 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8516 	const struct intel_limit *limit;
8517 	int refclk = 96000;
8518 
8519 	memset(&crtc_state->dpll_hw_state, 0,
8520 	       sizeof(crtc_state->dpll_hw_state));
8521 
8522 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8523 		if (intel_panel_use_ssc(dev_priv)) {
8524 			refclk = dev_priv->vbt.lvds_ssc_freq;
8525 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8526 		}
8527 
8528 		if (intel_is_dual_link_lvds(dev_priv))
8529 			limit = &intel_limits_g4x_dual_channel_lvds;
8530 		else
8531 			limit = &intel_limits_g4x_single_channel_lvds;
8532 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8533 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8534 		limit = &intel_limits_g4x_hdmi;
8535 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8536 		limit = &intel_limits_g4x_sdvo;
8537 	} else {
8538 		/* The option is for other outputs */
8539 		limit = &intel_limits_i9xx_sdvo;
8540 	}
8541 
8542 	if (!crtc_state->clock_set &&
8543 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8544 				refclk, NULL, &crtc_state->dpll)) {
8545 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8546 		return -EINVAL;
8547 	}
8548 
8549 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8550 
8551 	return 0;
8552 }
8553 
8554 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8555 				  struct intel_crtc_state *crtc_state)
8556 {
8557 	struct drm_device *dev = crtc->base.dev;
8558 	struct drm_i915_private *dev_priv = to_i915(dev);
8559 	const struct intel_limit *limit;
8560 	int refclk = 96000;
8561 
8562 	memset(&crtc_state->dpll_hw_state, 0,
8563 	       sizeof(crtc_state->dpll_hw_state));
8564 
8565 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8566 		if (intel_panel_use_ssc(dev_priv)) {
8567 			refclk = dev_priv->vbt.lvds_ssc_freq;
8568 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8569 		}
8570 
8571 		limit = &intel_limits_pineview_lvds;
8572 	} else {
8573 		limit = &intel_limits_pineview_sdvo;
8574 	}
8575 
8576 	if (!crtc_state->clock_set &&
8577 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8578 				refclk, NULL, &crtc_state->dpll)) {
8579 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8580 		return -EINVAL;
8581 	}
8582 
8583 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8584 
8585 	return 0;
8586 }
8587 
8588 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8589 				   struct intel_crtc_state *crtc_state)
8590 {
8591 	struct drm_device *dev = crtc->base.dev;
8592 	struct drm_i915_private *dev_priv = to_i915(dev);
8593 	const struct intel_limit *limit;
8594 	int refclk = 96000;
8595 
8596 	memset(&crtc_state->dpll_hw_state, 0,
8597 	       sizeof(crtc_state->dpll_hw_state));
8598 
8599 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8600 		if (intel_panel_use_ssc(dev_priv)) {
8601 			refclk = dev_priv->vbt.lvds_ssc_freq;
8602 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8603 		}
8604 
8605 		limit = &intel_limits_i9xx_lvds;
8606 	} else {
8607 		limit = &intel_limits_i9xx_sdvo;
8608 	}
8609 
8610 	if (!crtc_state->clock_set &&
8611 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8612 				 refclk, NULL, &crtc_state->dpll)) {
8613 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8614 		return -EINVAL;
8615 	}
8616 
8617 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8618 
8619 	return 0;
8620 }
8621 
8622 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8623 				  struct intel_crtc_state *crtc_state)
8624 {
8625 	int refclk = 100000;
8626 	const struct intel_limit *limit = &intel_limits_chv;
8627 
8628 	memset(&crtc_state->dpll_hw_state, 0,
8629 	       sizeof(crtc_state->dpll_hw_state));
8630 
8631 	if (!crtc_state->clock_set &&
8632 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8633 				refclk, NULL, &crtc_state->dpll)) {
8634 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8635 		return -EINVAL;
8636 	}
8637 
8638 	chv_compute_dpll(crtc, crtc_state);
8639 
8640 	return 0;
8641 }
8642 
8643 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8644 				  struct intel_crtc_state *crtc_state)
8645 {
8646 	int refclk = 100000;
8647 	const struct intel_limit *limit = &intel_limits_vlv;
8648 
8649 	memset(&crtc_state->dpll_hw_state, 0,
8650 	       sizeof(crtc_state->dpll_hw_state));
8651 
8652 	if (!crtc_state->clock_set &&
8653 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8654 				refclk, NULL, &crtc_state->dpll)) {
8655 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8656 		return -EINVAL;
8657 	}
8658 
8659 	vlv_compute_dpll(crtc, crtc_state);
8660 
8661 	return 0;
8662 }
8663 
8664 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8665 {
8666 	if (IS_I830(dev_priv))
8667 		return false;
8668 
8669 	return INTEL_GEN(dev_priv) >= 4 ||
8670 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8671 }
8672 
8673 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8674 				 struct intel_crtc_state *pipe_config)
8675 {
8676 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8677 	u32 tmp;
8678 
8679 	if (!i9xx_has_pfit(dev_priv))
8680 		return;
8681 
8682 	tmp = I915_READ(PFIT_CONTROL);
8683 	if (!(tmp & PFIT_ENABLE))
8684 		return;
8685 
8686 	/* Check whether the pfit is attached to our pipe. */
8687 	if (INTEL_GEN(dev_priv) < 4) {
8688 		if (crtc->pipe != PIPE_B)
8689 			return;
8690 	} else {
8691 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8692 			return;
8693 	}
8694 
8695 	pipe_config->gmch_pfit.control = tmp;
8696 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8697 }
8698 
8699 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8700 			       struct intel_crtc_state *pipe_config)
8701 {
8702 	struct drm_device *dev = crtc->base.dev;
8703 	struct drm_i915_private *dev_priv = to_i915(dev);
8704 	enum pipe pipe = crtc->pipe;
8705 	struct dpll clock;
8706 	u32 mdiv;
8707 	int refclk = 100000;
8708 
8709 	/* In case of DSI, DPLL will not be used */
8710 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8711 		return;
8712 
8713 	vlv_dpio_get(dev_priv);
8714 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8715 	vlv_dpio_put(dev_priv);
8716 
8717 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8718 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8719 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8720 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8721 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8722 
8723 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8724 }
8725 
8726 static void
8727 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8728 			      struct intel_initial_plane_config *plane_config)
8729 {
8730 	struct drm_device *dev = crtc->base.dev;
8731 	struct drm_i915_private *dev_priv = to_i915(dev);
8732 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8733 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8734 	enum pipe pipe;
8735 	u32 val, base, offset;
8736 	int fourcc, pixel_format;
8737 	unsigned int aligned_height;
8738 	struct drm_framebuffer *fb;
8739 	struct intel_framebuffer *intel_fb;
8740 
8741 	if (!plane->get_hw_state(plane, &pipe))
8742 		return;
8743 
8744 	WARN_ON(pipe != crtc->pipe);
8745 
8746 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8747 	if (!intel_fb) {
8748 		DRM_DEBUG_KMS("failed to alloc fb\n");
8749 		return;
8750 	}
8751 
8752 	fb = &intel_fb->base;
8753 
8754 	fb->dev = dev;
8755 
8756 	val = I915_READ(DSPCNTR(i9xx_plane));
8757 
8758 	if (INTEL_GEN(dev_priv) >= 4) {
8759 		if (val & DISPPLANE_TILED) {
8760 			plane_config->tiling = I915_TILING_X;
8761 			fb->modifier = I915_FORMAT_MOD_X_TILED;
8762 		}
8763 
8764 		if (val & DISPPLANE_ROTATE_180)
8765 			plane_config->rotation = DRM_MODE_ROTATE_180;
8766 	}
8767 
8768 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8769 	    val & DISPPLANE_MIRROR)
8770 		plane_config->rotation |= DRM_MODE_REFLECT_X;
8771 
8772 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8773 	fourcc = i9xx_format_to_fourcc(pixel_format);
8774 	fb->format = drm_format_info(fourcc);
8775 
8776 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8777 		offset = I915_READ(DSPOFFSET(i9xx_plane));
8778 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8779 	} else if (INTEL_GEN(dev_priv) >= 4) {
8780 		if (plane_config->tiling)
8781 			offset = I915_READ(DSPTILEOFF(i9xx_plane));
8782 		else
8783 			offset = I915_READ(DSPLINOFF(i9xx_plane));
8784 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8785 	} else {
8786 		base = I915_READ(DSPADDR(i9xx_plane));
8787 	}
8788 	plane_config->base = base;
8789 
8790 	val = I915_READ(PIPESRC(pipe));
8791 	fb->width = ((val >> 16) & 0xfff) + 1;
8792 	fb->height = ((val >> 0) & 0xfff) + 1;
8793 
8794 	val = I915_READ(DSPSTRIDE(i9xx_plane));
8795 	fb->pitches[0] = val & 0xffffffc0;
8796 
8797 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
8798 
8799 	plane_config->size = fb->pitches[0] * aligned_height;
8800 
8801 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8802 		      crtc->base.name, plane->base.name, fb->width, fb->height,
8803 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
8804 		      plane_config->size);
8805 
8806 	plane_config->fb = intel_fb;
8807 }
8808 
8809 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8810 			       struct intel_crtc_state *pipe_config)
8811 {
8812 	struct drm_device *dev = crtc->base.dev;
8813 	struct drm_i915_private *dev_priv = to_i915(dev);
8814 	enum pipe pipe = crtc->pipe;
8815 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8816 	struct dpll clock;
8817 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8818 	int refclk = 100000;
8819 
8820 	/* In case of DSI, DPLL will not be used */
8821 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8822 		return;
8823 
8824 	vlv_dpio_get(dev_priv);
8825 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8826 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8827 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8828 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8829 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8830 	vlv_dpio_put(dev_priv);
8831 
8832 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8833 	clock.m2 = (pll_dw0 & 0xff) << 22;
8834 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8835 		clock.m2 |= pll_dw2 & 0x3fffff;
8836 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8837 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8838 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8839 
8840 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8841 }
8842 
8843 static enum intel_output_format
8844 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
8845 {
8846 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8847 	u32 tmp;
8848 
8849 	tmp = I915_READ(PIPEMISC(crtc->pipe));
8850 
8851 	if (tmp & PIPEMISC_YUV420_ENABLE) {
8852 		/* We support 4:2:0 in full blend mode only */
8853 		WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
8854 
8855 		return INTEL_OUTPUT_FORMAT_YCBCR420;
8856 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8857 		return INTEL_OUTPUT_FORMAT_YCBCR444;
8858 	} else {
8859 		return INTEL_OUTPUT_FORMAT_RGB;
8860 	}
8861 }
8862 
8863 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8864 {
8865 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8866 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8867 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8868 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8869 	u32 tmp;
8870 
8871 	tmp = I915_READ(DSPCNTR(i9xx_plane));
8872 
8873 	if (tmp & DISPPLANE_GAMMA_ENABLE)
8874 		crtc_state->gamma_enable = true;
8875 
8876 	if (!HAS_GMCH(dev_priv) &&
8877 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
8878 		crtc_state->csc_enable = true;
8879 }
8880 
8881 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8882 				 struct intel_crtc_state *pipe_config)
8883 {
8884 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8885 	enum intel_display_power_domain power_domain;
8886 	intel_wakeref_t wakeref;
8887 	u32 tmp;
8888 	bool ret;
8889 
8890 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8891 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8892 	if (!wakeref)
8893 		return false;
8894 
8895 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8896 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8897 	pipe_config->shared_dpll = NULL;
8898 	pipe_config->master_transcoder = INVALID_TRANSCODER;
8899 
8900 	ret = false;
8901 
8902 	tmp = I915_READ(PIPECONF(crtc->pipe));
8903 	if (!(tmp & PIPECONF_ENABLE))
8904 		goto out;
8905 
8906 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8907 	    IS_CHERRYVIEW(dev_priv)) {
8908 		switch (tmp & PIPECONF_BPC_MASK) {
8909 		case PIPECONF_6BPC:
8910 			pipe_config->pipe_bpp = 18;
8911 			break;
8912 		case PIPECONF_8BPC:
8913 			pipe_config->pipe_bpp = 24;
8914 			break;
8915 		case PIPECONF_10BPC:
8916 			pipe_config->pipe_bpp = 30;
8917 			break;
8918 		default:
8919 			break;
8920 		}
8921 	}
8922 
8923 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8924 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8925 		pipe_config->limited_color_range = true;
8926 
8927 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8928 		PIPECONF_GAMMA_MODE_SHIFT;
8929 
8930 	if (IS_CHERRYVIEW(dev_priv))
8931 		pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8932 
8933 	i9xx_get_pipe_color_config(pipe_config);
8934 	intel_color_get_config(pipe_config);
8935 
8936 	if (INTEL_GEN(dev_priv) < 4)
8937 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8938 
8939 	intel_get_pipe_timings(crtc, pipe_config);
8940 	intel_get_pipe_src_size(crtc, pipe_config);
8941 
8942 	i9xx_get_pfit_config(crtc, pipe_config);
8943 
8944 	if (INTEL_GEN(dev_priv) >= 4) {
8945 		/* No way to read it out on pipes B and C */
8946 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8947 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
8948 		else
8949 			tmp = I915_READ(DPLL_MD(crtc->pipe));
8950 		pipe_config->pixel_multiplier =
8951 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8952 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8953 		pipe_config->dpll_hw_state.dpll_md = tmp;
8954 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8955 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8956 		tmp = I915_READ(DPLL(crtc->pipe));
8957 		pipe_config->pixel_multiplier =
8958 			((tmp & SDVO_MULTIPLIER_MASK)
8959 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8960 	} else {
8961 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8962 		 * port and will be fixed up in the encoder->get_config
8963 		 * function. */
8964 		pipe_config->pixel_multiplier = 1;
8965 	}
8966 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8967 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8968 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8969 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8970 	} else {
8971 		/* Mask out read-only status bits. */
8972 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8973 						     DPLL_PORTC_READY_MASK |
8974 						     DPLL_PORTB_READY_MASK);
8975 	}
8976 
8977 	if (IS_CHERRYVIEW(dev_priv))
8978 		chv_crtc_clock_get(crtc, pipe_config);
8979 	else if (IS_VALLEYVIEW(dev_priv))
8980 		vlv_crtc_clock_get(crtc, pipe_config);
8981 	else
8982 		i9xx_crtc_clock_get(crtc, pipe_config);
8983 
8984 	/*
8985 	 * Normally the dotclock is filled in by the encoder .get_config()
8986 	 * but in case the pipe is enabled w/o any ports we need a sane
8987 	 * default.
8988 	 */
8989 	pipe_config->base.adjusted_mode.crtc_clock =
8990 		pipe_config->port_clock / pipe_config->pixel_multiplier;
8991 
8992 	ret = true;
8993 
8994 out:
8995 	intel_display_power_put(dev_priv, power_domain, wakeref);
8996 
8997 	return ret;
8998 }
8999 
9000 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
9001 {
9002 	struct intel_encoder *encoder;
9003 	int i;
9004 	u32 val, final;
9005 	bool has_lvds = false;
9006 	bool has_cpu_edp = false;
9007 	bool has_panel = false;
9008 	bool has_ck505 = false;
9009 	bool can_ssc = false;
9010 	bool using_ssc_source = false;
9011 
9012 	/* We need to take the global config into account */
9013 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9014 		switch (encoder->type) {
9015 		case INTEL_OUTPUT_LVDS:
9016 			has_panel = true;
9017 			has_lvds = true;
9018 			break;
9019 		case INTEL_OUTPUT_EDP:
9020 			has_panel = true;
9021 			if (encoder->port == PORT_A)
9022 				has_cpu_edp = true;
9023 			break;
9024 		default:
9025 			break;
9026 		}
9027 	}
9028 
9029 	if (HAS_PCH_IBX(dev_priv)) {
9030 		has_ck505 = dev_priv->vbt.display_clock_mode;
9031 		can_ssc = has_ck505;
9032 	} else {
9033 		has_ck505 = false;
9034 		can_ssc = true;
9035 	}
9036 
9037 	/* Check if any DPLLs are using the SSC source */
9038 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9039 		u32 temp = I915_READ(PCH_DPLL(i));
9040 
9041 		if (!(temp & DPLL_VCO_ENABLE))
9042 			continue;
9043 
9044 		if ((temp & PLL_REF_INPUT_MASK) ==
9045 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9046 			using_ssc_source = true;
9047 			break;
9048 		}
9049 	}
9050 
9051 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9052 		      has_panel, has_lvds, has_ck505, using_ssc_source);
9053 
9054 	/* Ironlake: try to setup display ref clock before DPLL
9055 	 * enabling. This is only under driver's control after
9056 	 * PCH B stepping, previous chipset stepping should be
9057 	 * ignoring this setting.
9058 	 */
9059 	val = I915_READ(PCH_DREF_CONTROL);
9060 
9061 	/* As we must carefully and slowly disable/enable each source in turn,
9062 	 * compute the final state we want first and check if we need to
9063 	 * make any changes at all.
9064 	 */
9065 	final = val;
9066 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
9067 	if (has_ck505)
9068 		final |= DREF_NONSPREAD_CK505_ENABLE;
9069 	else
9070 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
9071 
9072 	final &= ~DREF_SSC_SOURCE_MASK;
9073 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9074 	final &= ~DREF_SSC1_ENABLE;
9075 
9076 	if (has_panel) {
9077 		final |= DREF_SSC_SOURCE_ENABLE;
9078 
9079 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
9080 			final |= DREF_SSC1_ENABLE;
9081 
9082 		if (has_cpu_edp) {
9083 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
9084 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9085 			else
9086 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9087 		} else
9088 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9089 	} else if (using_ssc_source) {
9090 		final |= DREF_SSC_SOURCE_ENABLE;
9091 		final |= DREF_SSC1_ENABLE;
9092 	}
9093 
9094 	if (final == val)
9095 		return;
9096 
9097 	/* Always enable nonspread source */
9098 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
9099 
9100 	if (has_ck505)
9101 		val |= DREF_NONSPREAD_CK505_ENABLE;
9102 	else
9103 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
9104 
9105 	if (has_panel) {
9106 		val &= ~DREF_SSC_SOURCE_MASK;
9107 		val |= DREF_SSC_SOURCE_ENABLE;
9108 
9109 		/* SSC must be turned on before enabling the CPU output  */
9110 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9111 			DRM_DEBUG_KMS("Using SSC on panel\n");
9112 			val |= DREF_SSC1_ENABLE;
9113 		} else
9114 			val &= ~DREF_SSC1_ENABLE;
9115 
9116 		/* Get SSC going before enabling the outputs */
9117 		I915_WRITE(PCH_DREF_CONTROL, val);
9118 		POSTING_READ(PCH_DREF_CONTROL);
9119 		udelay(200);
9120 
9121 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9122 
9123 		/* Enable CPU source on CPU attached eDP */
9124 		if (has_cpu_edp) {
9125 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9126 				DRM_DEBUG_KMS("Using SSC on eDP\n");
9127 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9128 			} else
9129 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9130 		} else
9131 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9132 
9133 		I915_WRITE(PCH_DREF_CONTROL, val);
9134 		POSTING_READ(PCH_DREF_CONTROL);
9135 		udelay(200);
9136 	} else {
9137 		DRM_DEBUG_KMS("Disabling CPU source output\n");
9138 
9139 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9140 
9141 		/* Turn off CPU output */
9142 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9143 
9144 		I915_WRITE(PCH_DREF_CONTROL, val);
9145 		POSTING_READ(PCH_DREF_CONTROL);
9146 		udelay(200);
9147 
9148 		if (!using_ssc_source) {
9149 			DRM_DEBUG_KMS("Disabling SSC source\n");
9150 
9151 			/* Turn off the SSC source */
9152 			val &= ~DREF_SSC_SOURCE_MASK;
9153 			val |= DREF_SSC_SOURCE_DISABLE;
9154 
9155 			/* Turn off SSC1 */
9156 			val &= ~DREF_SSC1_ENABLE;
9157 
9158 			I915_WRITE(PCH_DREF_CONTROL, val);
9159 			POSTING_READ(PCH_DREF_CONTROL);
9160 			udelay(200);
9161 		}
9162 	}
9163 
9164 	BUG_ON(val != final);
9165 }
9166 
9167 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9168 {
9169 	u32 tmp;
9170 
9171 	tmp = I915_READ(SOUTH_CHICKEN2);
9172 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9173 	I915_WRITE(SOUTH_CHICKEN2, tmp);
9174 
9175 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
9176 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9177 		DRM_ERROR("FDI mPHY reset assert timeout\n");
9178 
9179 	tmp = I915_READ(SOUTH_CHICKEN2);
9180 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9181 	I915_WRITE(SOUTH_CHICKEN2, tmp);
9182 
9183 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
9184 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9185 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
9186 }
9187 
9188 /* WaMPhyProgramming:hsw */
9189 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9190 {
9191 	u32 tmp;
9192 
9193 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9194 	tmp &= ~(0xFF << 24);
9195 	tmp |= (0x12 << 24);
9196 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9197 
9198 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9199 	tmp |= (1 << 11);
9200 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9201 
9202 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9203 	tmp |= (1 << 11);
9204 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9205 
9206 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9207 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9208 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9209 
9210 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9211 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9212 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9213 
9214 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9215 	tmp &= ~(7 << 13);
9216 	tmp |= (5 << 13);
9217 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9218 
9219 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9220 	tmp &= ~(7 << 13);
9221 	tmp |= (5 << 13);
9222 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9223 
9224 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9225 	tmp &= ~0xFF;
9226 	tmp |= 0x1C;
9227 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9228 
9229 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9230 	tmp &= ~0xFF;
9231 	tmp |= 0x1C;
9232 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9233 
9234 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9235 	tmp &= ~(0xFF << 16);
9236 	tmp |= (0x1C << 16);
9237 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9238 
9239 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9240 	tmp &= ~(0xFF << 16);
9241 	tmp |= (0x1C << 16);
9242 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9243 
9244 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9245 	tmp |= (1 << 27);
9246 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9247 
9248 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9249 	tmp |= (1 << 27);
9250 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9251 
9252 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9253 	tmp &= ~(0xF << 28);
9254 	tmp |= (4 << 28);
9255 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9256 
9257 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9258 	tmp &= ~(0xF << 28);
9259 	tmp |= (4 << 28);
9260 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9261 }
9262 
9263 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9264  * Programming" based on the parameters passed:
9265  * - Sequence to enable CLKOUT_DP
9266  * - Sequence to enable CLKOUT_DP without spread
9267  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9268  */
9269 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9270 				 bool with_spread, bool with_fdi)
9271 {
9272 	u32 reg, tmp;
9273 
9274 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9275 		with_spread = true;
9276 	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9277 	    with_fdi, "LP PCH doesn't have FDI\n"))
9278 		with_fdi = false;
9279 
9280 	mutex_lock(&dev_priv->sb_lock);
9281 
9282 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9283 	tmp &= ~SBI_SSCCTL_DISABLE;
9284 	tmp |= SBI_SSCCTL_PATHALT;
9285 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9286 
9287 	udelay(24);
9288 
9289 	if (with_spread) {
9290 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9291 		tmp &= ~SBI_SSCCTL_PATHALT;
9292 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9293 
9294 		if (with_fdi) {
9295 			lpt_reset_fdi_mphy(dev_priv);
9296 			lpt_program_fdi_mphy(dev_priv);
9297 		}
9298 	}
9299 
9300 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9301 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9302 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9303 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9304 
9305 	mutex_unlock(&dev_priv->sb_lock);
9306 }
9307 
9308 /* Sequence to disable CLKOUT_DP */
9309 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9310 {
9311 	u32 reg, tmp;
9312 
9313 	mutex_lock(&dev_priv->sb_lock);
9314 
9315 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9316 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9317 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9318 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9319 
9320 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9321 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
9322 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
9323 			tmp |= SBI_SSCCTL_PATHALT;
9324 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9325 			udelay(32);
9326 		}
9327 		tmp |= SBI_SSCCTL_DISABLE;
9328 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9329 	}
9330 
9331 	mutex_unlock(&dev_priv->sb_lock);
9332 }
9333 
9334 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9335 
9336 static const u16 sscdivintphase[] = {
9337 	[BEND_IDX( 50)] = 0x3B23,
9338 	[BEND_IDX( 45)] = 0x3B23,
9339 	[BEND_IDX( 40)] = 0x3C23,
9340 	[BEND_IDX( 35)] = 0x3C23,
9341 	[BEND_IDX( 30)] = 0x3D23,
9342 	[BEND_IDX( 25)] = 0x3D23,
9343 	[BEND_IDX( 20)] = 0x3E23,
9344 	[BEND_IDX( 15)] = 0x3E23,
9345 	[BEND_IDX( 10)] = 0x3F23,
9346 	[BEND_IDX(  5)] = 0x3F23,
9347 	[BEND_IDX(  0)] = 0x0025,
9348 	[BEND_IDX( -5)] = 0x0025,
9349 	[BEND_IDX(-10)] = 0x0125,
9350 	[BEND_IDX(-15)] = 0x0125,
9351 	[BEND_IDX(-20)] = 0x0225,
9352 	[BEND_IDX(-25)] = 0x0225,
9353 	[BEND_IDX(-30)] = 0x0325,
9354 	[BEND_IDX(-35)] = 0x0325,
9355 	[BEND_IDX(-40)] = 0x0425,
9356 	[BEND_IDX(-45)] = 0x0425,
9357 	[BEND_IDX(-50)] = 0x0525,
9358 };
9359 
9360 /*
9361  * Bend CLKOUT_DP
9362  * steps -50 to 50 inclusive, in steps of 5
9363  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9364  * change in clock period = -(steps / 10) * 5.787 ps
9365  */
9366 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9367 {
9368 	u32 tmp;
9369 	int idx = BEND_IDX(steps);
9370 
9371 	if (WARN_ON(steps % 5 != 0))
9372 		return;
9373 
9374 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9375 		return;
9376 
9377 	mutex_lock(&dev_priv->sb_lock);
9378 
9379 	if (steps % 10 != 0)
9380 		tmp = 0xAAAAAAAB;
9381 	else
9382 		tmp = 0x00000000;
9383 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9384 
9385 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9386 	tmp &= 0xffff0000;
9387 	tmp |= sscdivintphase[idx];
9388 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9389 
9390 	mutex_unlock(&dev_priv->sb_lock);
9391 }
9392 
9393 #undef BEND_IDX
9394 
9395 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9396 {
9397 	u32 fuse_strap = I915_READ(FUSE_STRAP);
9398 	u32 ctl = I915_READ(SPLL_CTL);
9399 
9400 	if ((ctl & SPLL_PLL_ENABLE) == 0)
9401 		return false;
9402 
9403 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9404 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9405 		return true;
9406 
9407 	if (IS_BROADWELL(dev_priv) &&
9408 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9409 		return true;
9410 
9411 	return false;
9412 }
9413 
9414 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9415 			       enum intel_dpll_id id)
9416 {
9417 	u32 fuse_strap = I915_READ(FUSE_STRAP);
9418 	u32 ctl = I915_READ(WRPLL_CTL(id));
9419 
9420 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
9421 		return false;
9422 
9423 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9424 		return true;
9425 
9426 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9427 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9428 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9429 		return true;
9430 
9431 	return false;
9432 }
9433 
9434 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9435 {
9436 	struct intel_encoder *encoder;
9437 	bool has_fdi = false;
9438 
9439 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9440 		switch (encoder->type) {
9441 		case INTEL_OUTPUT_ANALOG:
9442 			has_fdi = true;
9443 			break;
9444 		default:
9445 			break;
9446 		}
9447 	}
9448 
9449 	/*
9450 	 * The BIOS may have decided to use the PCH SSC
9451 	 * reference so we must not disable it until the
9452 	 * relevant PLLs have stopped relying on it. We'll
9453 	 * just leave the PCH SSC reference enabled in case
9454 	 * any active PLL is using it. It will get disabled
9455 	 * after runtime suspend if we don't have FDI.
9456 	 *
9457 	 * TODO: Move the whole reference clock handling
9458 	 * to the modeset sequence proper so that we can
9459 	 * actually enable/disable/reconfigure these things
9460 	 * safely. To do that we need to introduce a real
9461 	 * clock hierarchy. That would also allow us to do
9462 	 * clock bending finally.
9463 	 */
9464 	dev_priv->pch_ssc_use = 0;
9465 
9466 	if (spll_uses_pch_ssc(dev_priv)) {
9467 		DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9468 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9469 	}
9470 
9471 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9472 		DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9473 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9474 	}
9475 
9476 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9477 		DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9478 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9479 	}
9480 
9481 	if (dev_priv->pch_ssc_use)
9482 		return;
9483 
9484 	if (has_fdi) {
9485 		lpt_bend_clkout_dp(dev_priv, 0);
9486 		lpt_enable_clkout_dp(dev_priv, true, true);
9487 	} else {
9488 		lpt_disable_clkout_dp(dev_priv);
9489 	}
9490 }
9491 
9492 /*
9493  * Initialize reference clocks when the driver loads
9494  */
9495 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9496 {
9497 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9498 		ironlake_init_pch_refclk(dev_priv);
9499 	else if (HAS_PCH_LPT(dev_priv))
9500 		lpt_init_pch_refclk(dev_priv);
9501 }
9502 
9503 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9504 {
9505 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9506 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9507 	enum pipe pipe = crtc->pipe;
9508 	u32 val;
9509 
9510 	val = 0;
9511 
9512 	switch (crtc_state->pipe_bpp) {
9513 	case 18:
9514 		val |= PIPECONF_6BPC;
9515 		break;
9516 	case 24:
9517 		val |= PIPECONF_8BPC;
9518 		break;
9519 	case 30:
9520 		val |= PIPECONF_10BPC;
9521 		break;
9522 	case 36:
9523 		val |= PIPECONF_12BPC;
9524 		break;
9525 	default:
9526 		/* Case prevented by intel_choose_pipe_bpp_dither. */
9527 		BUG();
9528 	}
9529 
9530 	if (crtc_state->dither)
9531 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9532 
9533 	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9534 		val |= PIPECONF_INTERLACED_ILK;
9535 	else
9536 		val |= PIPECONF_PROGRESSIVE;
9537 
9538 	/*
9539 	 * This would end up with an odd purple hue over
9540 	 * the entire display. Make sure we don't do it.
9541 	 */
9542 	WARN_ON(crtc_state->limited_color_range &&
9543 		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
9544 
9545 	if (crtc_state->limited_color_range)
9546 		val |= PIPECONF_COLOR_RANGE_SELECT;
9547 
9548 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9549 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
9550 
9551 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9552 
9553 	I915_WRITE(PIPECONF(pipe), val);
9554 	POSTING_READ(PIPECONF(pipe));
9555 }
9556 
9557 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9558 {
9559 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9560 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9561 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9562 	u32 val = 0;
9563 
9564 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
9565 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9566 
9567 	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9568 		val |= PIPECONF_INTERLACED_ILK;
9569 	else
9570 		val |= PIPECONF_PROGRESSIVE;
9571 
9572 	if (IS_HASWELL(dev_priv) &&
9573 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
9574 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
9575 
9576 	I915_WRITE(PIPECONF(cpu_transcoder), val);
9577 	POSTING_READ(PIPECONF(cpu_transcoder));
9578 }
9579 
9580 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9581 {
9582 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9583 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9584 	u32 val = 0;
9585 
9586 	switch (crtc_state->pipe_bpp) {
9587 	case 18:
9588 		val |= PIPEMISC_DITHER_6_BPC;
9589 		break;
9590 	case 24:
9591 		val |= PIPEMISC_DITHER_8_BPC;
9592 		break;
9593 	case 30:
9594 		val |= PIPEMISC_DITHER_10_BPC;
9595 		break;
9596 	case 36:
9597 		val |= PIPEMISC_DITHER_12_BPC;
9598 		break;
9599 	default:
9600 		MISSING_CASE(crtc_state->pipe_bpp);
9601 		break;
9602 	}
9603 
9604 	if (crtc_state->dither)
9605 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9606 
9607 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9608 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9609 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9610 
9611 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9612 		val |= PIPEMISC_YUV420_ENABLE |
9613 			PIPEMISC_YUV420_MODE_FULL_BLEND;
9614 
9615 	if (INTEL_GEN(dev_priv) >= 11 &&
9616 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9617 					   BIT(PLANE_CURSOR))) == 0)
9618 		val |= PIPEMISC_HDR_MODE_PRECISION;
9619 
9620 	I915_WRITE(PIPEMISC(crtc->pipe), val);
9621 }
9622 
9623 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9624 {
9625 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9626 	u32 tmp;
9627 
9628 	tmp = I915_READ(PIPEMISC(crtc->pipe));
9629 
9630 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9631 	case PIPEMISC_DITHER_6_BPC:
9632 		return 18;
9633 	case PIPEMISC_DITHER_8_BPC:
9634 		return 24;
9635 	case PIPEMISC_DITHER_10_BPC:
9636 		return 30;
9637 	case PIPEMISC_DITHER_12_BPC:
9638 		return 36;
9639 	default:
9640 		MISSING_CASE(tmp);
9641 		return 0;
9642 	}
9643 }
9644 
9645 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9646 {
9647 	/*
9648 	 * Account for spread spectrum to avoid
9649 	 * oversubscribing the link. Max center spread
9650 	 * is 2.5%; use 5% for safety's sake.
9651 	 */
9652 	u32 bps = target_clock * bpp * 21 / 20;
9653 	return DIV_ROUND_UP(bps, link_bw * 8);
9654 }
9655 
9656 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9657 {
9658 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9659 }
9660 
9661 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9662 				  struct intel_crtc_state *crtc_state,
9663 				  struct dpll *reduced_clock)
9664 {
9665 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9666 	u32 dpll, fp, fp2;
9667 	int factor;
9668 
9669 	/* Enable autotuning of the PLL clock (if permissible) */
9670 	factor = 21;
9671 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9672 		if ((intel_panel_use_ssc(dev_priv) &&
9673 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
9674 		    (HAS_PCH_IBX(dev_priv) &&
9675 		     intel_is_dual_link_lvds(dev_priv)))
9676 			factor = 25;
9677 	} else if (crtc_state->sdvo_tv_clock) {
9678 		factor = 20;
9679 	}
9680 
9681 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9682 
9683 	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9684 		fp |= FP_CB_TUNE;
9685 
9686 	if (reduced_clock) {
9687 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
9688 
9689 		if (reduced_clock->m < factor * reduced_clock->n)
9690 			fp2 |= FP_CB_TUNE;
9691 	} else {
9692 		fp2 = fp;
9693 	}
9694 
9695 	dpll = 0;
9696 
9697 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9698 		dpll |= DPLLB_MODE_LVDS;
9699 	else
9700 		dpll |= DPLLB_MODE_DAC_SERIAL;
9701 
9702 	dpll |= (crtc_state->pixel_multiplier - 1)
9703 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9704 
9705 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9706 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9707 		dpll |= DPLL_SDVO_HIGH_SPEED;
9708 
9709 	if (intel_crtc_has_dp_encoder(crtc_state))
9710 		dpll |= DPLL_SDVO_HIGH_SPEED;
9711 
9712 	/*
9713 	 * The high speed IO clock is only really required for
9714 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9715 	 * possible to share the DPLL between CRT and HDMI. Enabling
9716 	 * the clock needlessly does no real harm, except use up a
9717 	 * bit of power potentially.
9718 	 *
9719 	 * We'll limit this to IVB with 3 pipes, since it has only two
9720 	 * DPLLs and so DPLL sharing is the only way to get three pipes
9721 	 * driving PCH ports at the same time. On SNB we could do this,
9722 	 * and potentially avoid enabling the second DPLL, but it's not
9723 	 * clear if it''s a win or loss power wise. No point in doing
9724 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9725 	 */
9726 	if (INTEL_NUM_PIPES(dev_priv) == 3 &&
9727 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9728 		dpll |= DPLL_SDVO_HIGH_SPEED;
9729 
9730 	/* compute bitmask from p1 value */
9731 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9732 	/* also FPA1 */
9733 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9734 
9735 	switch (crtc_state->dpll.p2) {
9736 	case 5:
9737 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9738 		break;
9739 	case 7:
9740 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9741 		break;
9742 	case 10:
9743 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9744 		break;
9745 	case 14:
9746 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9747 		break;
9748 	}
9749 
9750 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9751 	    intel_panel_use_ssc(dev_priv))
9752 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9753 	else
9754 		dpll |= PLL_REF_INPUT_DREFCLK;
9755 
9756 	dpll |= DPLL_VCO_ENABLE;
9757 
9758 	crtc_state->dpll_hw_state.dpll = dpll;
9759 	crtc_state->dpll_hw_state.fp0 = fp;
9760 	crtc_state->dpll_hw_state.fp1 = fp2;
9761 }
9762 
9763 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9764 				       struct intel_crtc_state *crtc_state)
9765 {
9766 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9767 	struct intel_atomic_state *state =
9768 		to_intel_atomic_state(crtc_state->base.state);
9769 	const struct intel_limit *limit;
9770 	int refclk = 120000;
9771 
9772 	memset(&crtc_state->dpll_hw_state, 0,
9773 	       sizeof(crtc_state->dpll_hw_state));
9774 
9775 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9776 	if (!crtc_state->has_pch_encoder)
9777 		return 0;
9778 
9779 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9780 		if (intel_panel_use_ssc(dev_priv)) {
9781 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9782 				      dev_priv->vbt.lvds_ssc_freq);
9783 			refclk = dev_priv->vbt.lvds_ssc_freq;
9784 		}
9785 
9786 		if (intel_is_dual_link_lvds(dev_priv)) {
9787 			if (refclk == 100000)
9788 				limit = &intel_limits_ironlake_dual_lvds_100m;
9789 			else
9790 				limit = &intel_limits_ironlake_dual_lvds;
9791 		} else {
9792 			if (refclk == 100000)
9793 				limit = &intel_limits_ironlake_single_lvds_100m;
9794 			else
9795 				limit = &intel_limits_ironlake_single_lvds;
9796 		}
9797 	} else {
9798 		limit = &intel_limits_ironlake_dac;
9799 	}
9800 
9801 	if (!crtc_state->clock_set &&
9802 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9803 				refclk, NULL, &crtc_state->dpll)) {
9804 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
9805 		return -EINVAL;
9806 	}
9807 
9808 	ironlake_compute_dpll(crtc, crtc_state, NULL);
9809 
9810 	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
9811 		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9812 			      pipe_name(crtc->pipe));
9813 		return -EINVAL;
9814 	}
9815 
9816 	return 0;
9817 }
9818 
9819 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9820 					 struct intel_link_m_n *m_n)
9821 {
9822 	struct drm_device *dev = crtc->base.dev;
9823 	struct drm_i915_private *dev_priv = to_i915(dev);
9824 	enum pipe pipe = crtc->pipe;
9825 
9826 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9827 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9828 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9829 		& ~TU_SIZE_MASK;
9830 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9831 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9832 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9833 }
9834 
9835 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9836 					 enum transcoder transcoder,
9837 					 struct intel_link_m_n *m_n,
9838 					 struct intel_link_m_n *m2_n2)
9839 {
9840 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9841 	enum pipe pipe = crtc->pipe;
9842 
9843 	if (INTEL_GEN(dev_priv) >= 5) {
9844 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9845 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9846 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9847 			& ~TU_SIZE_MASK;
9848 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9849 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9850 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9851 
9852 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9853 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9854 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9855 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9856 					& ~TU_SIZE_MASK;
9857 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9858 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9859 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9860 		}
9861 	} else {
9862 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9863 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9864 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9865 			& ~TU_SIZE_MASK;
9866 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9867 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9868 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9869 	}
9870 }
9871 
9872 void intel_dp_get_m_n(struct intel_crtc *crtc,
9873 		      struct intel_crtc_state *pipe_config)
9874 {
9875 	if (pipe_config->has_pch_encoder)
9876 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9877 	else
9878 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9879 					     &pipe_config->dp_m_n,
9880 					     &pipe_config->dp_m2_n2);
9881 }
9882 
9883 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9884 					struct intel_crtc_state *pipe_config)
9885 {
9886 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9887 				     &pipe_config->fdi_m_n, NULL);
9888 }
9889 
9890 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9891 				    struct intel_crtc_state *pipe_config)
9892 {
9893 	struct drm_device *dev = crtc->base.dev;
9894 	struct drm_i915_private *dev_priv = to_i915(dev);
9895 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9896 	u32 ps_ctrl = 0;
9897 	int id = -1;
9898 	int i;
9899 
9900 	/* find scaler attached to this pipe */
9901 	for (i = 0; i < crtc->num_scalers; i++) {
9902 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9903 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9904 			id = i;
9905 			pipe_config->pch_pfit.enabled = true;
9906 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9907 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9908 			scaler_state->scalers[i].in_use = true;
9909 			break;
9910 		}
9911 	}
9912 
9913 	scaler_state->scaler_id = id;
9914 	if (id >= 0) {
9915 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9916 	} else {
9917 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9918 	}
9919 }
9920 
9921 static void
9922 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9923 				 struct intel_initial_plane_config *plane_config)
9924 {
9925 	struct drm_device *dev = crtc->base.dev;
9926 	struct drm_i915_private *dev_priv = to_i915(dev);
9927 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9928 	enum plane_id plane_id = plane->id;
9929 	enum pipe pipe;
9930 	u32 val, base, offset, stride_mult, tiling, alpha;
9931 	int fourcc, pixel_format;
9932 	unsigned int aligned_height;
9933 	struct drm_framebuffer *fb;
9934 	struct intel_framebuffer *intel_fb;
9935 
9936 	if (!plane->get_hw_state(plane, &pipe))
9937 		return;
9938 
9939 	WARN_ON(pipe != crtc->pipe);
9940 
9941 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9942 	if (!intel_fb) {
9943 		DRM_DEBUG_KMS("failed to alloc fb\n");
9944 		return;
9945 	}
9946 
9947 	fb = &intel_fb->base;
9948 
9949 	fb->dev = dev;
9950 
9951 	val = I915_READ(PLANE_CTL(pipe, plane_id));
9952 
9953 	if (INTEL_GEN(dev_priv) >= 11)
9954 		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9955 	else
9956 		pixel_format = val & PLANE_CTL_FORMAT_MASK;
9957 
9958 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9959 		alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9960 		alpha &= PLANE_COLOR_ALPHA_MASK;
9961 	} else {
9962 		alpha = val & PLANE_CTL_ALPHA_MASK;
9963 	}
9964 
9965 	fourcc = skl_format_to_fourcc(pixel_format,
9966 				      val & PLANE_CTL_ORDER_RGBX, alpha);
9967 	fb->format = drm_format_info(fourcc);
9968 
9969 	tiling = val & PLANE_CTL_TILED_MASK;
9970 	switch (tiling) {
9971 	case PLANE_CTL_TILED_LINEAR:
9972 		fb->modifier = DRM_FORMAT_MOD_LINEAR;
9973 		break;
9974 	case PLANE_CTL_TILED_X:
9975 		plane_config->tiling = I915_TILING_X;
9976 		fb->modifier = I915_FORMAT_MOD_X_TILED;
9977 		break;
9978 	case PLANE_CTL_TILED_Y:
9979 		plane_config->tiling = I915_TILING_Y;
9980 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9981 			fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9982 		else
9983 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
9984 		break;
9985 	case PLANE_CTL_TILED_YF:
9986 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9987 			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9988 		else
9989 			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9990 		break;
9991 	default:
9992 		MISSING_CASE(tiling);
9993 		goto error;
9994 	}
9995 
9996 	/*
9997 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9998 	 * while i915 HW rotation is clockwise, thats why this swapping.
9999 	 */
10000 	switch (val & PLANE_CTL_ROTATE_MASK) {
10001 	case PLANE_CTL_ROTATE_0:
10002 		plane_config->rotation = DRM_MODE_ROTATE_0;
10003 		break;
10004 	case PLANE_CTL_ROTATE_90:
10005 		plane_config->rotation = DRM_MODE_ROTATE_270;
10006 		break;
10007 	case PLANE_CTL_ROTATE_180:
10008 		plane_config->rotation = DRM_MODE_ROTATE_180;
10009 		break;
10010 	case PLANE_CTL_ROTATE_270:
10011 		plane_config->rotation = DRM_MODE_ROTATE_90;
10012 		break;
10013 	}
10014 
10015 	if (INTEL_GEN(dev_priv) >= 10 &&
10016 	    val & PLANE_CTL_FLIP_HORIZONTAL)
10017 		plane_config->rotation |= DRM_MODE_REFLECT_X;
10018 
10019 	base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10020 	plane_config->base = base;
10021 
10022 	offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
10023 
10024 	val = I915_READ(PLANE_SIZE(pipe, plane_id));
10025 	fb->height = ((val >> 16) & 0xffff) + 1;
10026 	fb->width = ((val >> 0) & 0xffff) + 1;
10027 
10028 	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
10029 	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10030 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
10031 
10032 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
10033 
10034 	plane_config->size = fb->pitches[0] * aligned_height;
10035 
10036 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10037 		      crtc->base.name, plane->base.name, fb->width, fb->height,
10038 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
10039 		      plane_config->size);
10040 
10041 	plane_config->fb = intel_fb;
10042 	return;
10043 
10044 error:
10045 	kfree(intel_fb);
10046 }
10047 
10048 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
10049 				     struct intel_crtc_state *pipe_config)
10050 {
10051 	struct drm_device *dev = crtc->base.dev;
10052 	struct drm_i915_private *dev_priv = to_i915(dev);
10053 	u32 tmp;
10054 
10055 	tmp = I915_READ(PF_CTL(crtc->pipe));
10056 
10057 	if (tmp & PF_ENABLE) {
10058 		pipe_config->pch_pfit.enabled = true;
10059 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
10060 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
10061 
10062 		/* We currently do not free assignements of panel fitters on
10063 		 * ivb/hsw (since we don't use the higher upscaling modes which
10064 		 * differentiates them) so just WARN about this case for now. */
10065 		if (IS_GEN(dev_priv, 7)) {
10066 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10067 				PF_PIPE_SEL_IVB(crtc->pipe));
10068 		}
10069 	}
10070 }
10071 
10072 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
10073 				     struct intel_crtc_state *pipe_config)
10074 {
10075 	struct drm_device *dev = crtc->base.dev;
10076 	struct drm_i915_private *dev_priv = to_i915(dev);
10077 	enum intel_display_power_domain power_domain;
10078 	intel_wakeref_t wakeref;
10079 	u32 tmp;
10080 	bool ret;
10081 
10082 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10083 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10084 	if (!wakeref)
10085 		return false;
10086 
10087 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10088 	pipe_config->shared_dpll = NULL;
10089 	pipe_config->master_transcoder = INVALID_TRANSCODER;
10090 
10091 	ret = false;
10092 	tmp = I915_READ(PIPECONF(crtc->pipe));
10093 	if (!(tmp & PIPECONF_ENABLE))
10094 		goto out;
10095 
10096 	switch (tmp & PIPECONF_BPC_MASK) {
10097 	case PIPECONF_6BPC:
10098 		pipe_config->pipe_bpp = 18;
10099 		break;
10100 	case PIPECONF_8BPC:
10101 		pipe_config->pipe_bpp = 24;
10102 		break;
10103 	case PIPECONF_10BPC:
10104 		pipe_config->pipe_bpp = 30;
10105 		break;
10106 	case PIPECONF_12BPC:
10107 		pipe_config->pipe_bpp = 36;
10108 		break;
10109 	default:
10110 		break;
10111 	}
10112 
10113 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10114 		pipe_config->limited_color_range = true;
10115 
10116 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10117 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10118 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10119 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10120 		break;
10121 	default:
10122 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10123 		break;
10124 	}
10125 
10126 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10127 		PIPECONF_GAMMA_MODE_SHIFT;
10128 
10129 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10130 
10131 	i9xx_get_pipe_color_config(pipe_config);
10132 	intel_color_get_config(pipe_config);
10133 
10134 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10135 		struct intel_shared_dpll *pll;
10136 		enum intel_dpll_id pll_id;
10137 
10138 		pipe_config->has_pch_encoder = true;
10139 
10140 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
10141 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10142 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10143 
10144 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
10145 
10146 		if (HAS_PCH_IBX(dev_priv)) {
10147 			/*
10148 			 * The pipe->pch transcoder and pch transcoder->pll
10149 			 * mapping is fixed.
10150 			 */
10151 			pll_id = (enum intel_dpll_id) crtc->pipe;
10152 		} else {
10153 			tmp = I915_READ(PCH_DPLL_SEL);
10154 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10155 				pll_id = DPLL_ID_PCH_PLL_B;
10156 			else
10157 				pll_id= DPLL_ID_PCH_PLL_A;
10158 		}
10159 
10160 		pipe_config->shared_dpll =
10161 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
10162 		pll = pipe_config->shared_dpll;
10163 
10164 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10165 						&pipe_config->dpll_hw_state));
10166 
10167 		tmp = pipe_config->dpll_hw_state.dpll;
10168 		pipe_config->pixel_multiplier =
10169 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10170 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10171 
10172 		ironlake_pch_clock_get(crtc, pipe_config);
10173 	} else {
10174 		pipe_config->pixel_multiplier = 1;
10175 	}
10176 
10177 	intel_get_pipe_timings(crtc, pipe_config);
10178 	intel_get_pipe_src_size(crtc, pipe_config);
10179 
10180 	ironlake_get_pfit_config(crtc, pipe_config);
10181 
10182 	ret = true;
10183 
10184 out:
10185 	intel_display_power_put(dev_priv, power_domain, wakeref);
10186 
10187 	return ret;
10188 }
10189 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
10190 				      struct intel_crtc_state *crtc_state)
10191 {
10192 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10193 	struct intel_atomic_state *state =
10194 		to_intel_atomic_state(crtc_state->base.state);
10195 
10196 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10197 	    INTEL_GEN(dev_priv) >= 11) {
10198 		struct intel_encoder *encoder =
10199 			intel_get_crtc_new_encoder(state, crtc_state);
10200 
10201 		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10202 			DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
10203 				      pipe_name(crtc->pipe));
10204 			return -EINVAL;
10205 		}
10206 	}
10207 
10208 	return 0;
10209 }
10210 
10211 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
10212 				   enum port port,
10213 				   struct intel_crtc_state *pipe_config)
10214 {
10215 	enum intel_dpll_id id;
10216 	u32 temp;
10217 
10218 	temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10219 	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10220 
10221 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10222 		return;
10223 
10224 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10225 }
10226 
10227 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
10228 				enum port port,
10229 				struct intel_crtc_state *pipe_config)
10230 {
10231 	enum phy phy = intel_port_to_phy(dev_priv, port);
10232 	enum icl_port_dpll_id port_dpll_id;
10233 	enum intel_dpll_id id;
10234 	u32 temp;
10235 
10236 	if (intel_phy_is_combo(dev_priv, phy)) {
10237 		temp = I915_READ(ICL_DPCLKA_CFGCR0) &
10238 			ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10239 		id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10240 		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10241 	} else if (intel_phy_is_tc(dev_priv, phy)) {
10242 		u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10243 
10244 		if (clk_sel == DDI_CLK_SEL_MG) {
10245 			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10246 								    port));
10247 			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10248 		} else {
10249 			WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10250 			id = DPLL_ID_ICL_TBTPLL;
10251 			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10252 		}
10253 	} else {
10254 		WARN(1, "Invalid port %x\n", port);
10255 		return;
10256 	}
10257 
10258 	pipe_config->icl_port_dplls[port_dpll_id].pll =
10259 		intel_get_shared_dpll_by_id(dev_priv, id);
10260 
10261 	icl_set_active_port_dpll(pipe_config, port_dpll_id);
10262 }
10263 
10264 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10265 				enum port port,
10266 				struct intel_crtc_state *pipe_config)
10267 {
10268 	enum intel_dpll_id id;
10269 
10270 	switch (port) {
10271 	case PORT_A:
10272 		id = DPLL_ID_SKL_DPLL0;
10273 		break;
10274 	case PORT_B:
10275 		id = DPLL_ID_SKL_DPLL1;
10276 		break;
10277 	case PORT_C:
10278 		id = DPLL_ID_SKL_DPLL2;
10279 		break;
10280 	default:
10281 		DRM_ERROR("Incorrect port type\n");
10282 		return;
10283 	}
10284 
10285 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10286 }
10287 
10288 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
10289 				enum port port,
10290 				struct intel_crtc_state *pipe_config)
10291 {
10292 	enum intel_dpll_id id;
10293 	u32 temp;
10294 
10295 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10296 	id = temp >> (port * 3 + 1);
10297 
10298 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10299 		return;
10300 
10301 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10302 }
10303 
10304 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10305 				enum port port,
10306 				struct intel_crtc_state *pipe_config)
10307 {
10308 	enum intel_dpll_id id;
10309 	u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10310 
10311 	switch (ddi_pll_sel) {
10312 	case PORT_CLK_SEL_WRPLL1:
10313 		id = DPLL_ID_WRPLL1;
10314 		break;
10315 	case PORT_CLK_SEL_WRPLL2:
10316 		id = DPLL_ID_WRPLL2;
10317 		break;
10318 	case PORT_CLK_SEL_SPLL:
10319 		id = DPLL_ID_SPLL;
10320 		break;
10321 	case PORT_CLK_SEL_LCPLL_810:
10322 		id = DPLL_ID_LCPLL_810;
10323 		break;
10324 	case PORT_CLK_SEL_LCPLL_1350:
10325 		id = DPLL_ID_LCPLL_1350;
10326 		break;
10327 	case PORT_CLK_SEL_LCPLL_2700:
10328 		id = DPLL_ID_LCPLL_2700;
10329 		break;
10330 	default:
10331 		MISSING_CASE(ddi_pll_sel);
10332 		/* fall through */
10333 	case PORT_CLK_SEL_NONE:
10334 		return;
10335 	}
10336 
10337 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10338 }
10339 
10340 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10341 				     struct intel_crtc_state *pipe_config,
10342 				     u64 *power_domain_mask,
10343 				     intel_wakeref_t *wakerefs)
10344 {
10345 	struct drm_device *dev = crtc->base.dev;
10346 	struct drm_i915_private *dev_priv = to_i915(dev);
10347 	enum intel_display_power_domain power_domain;
10348 	unsigned long panel_transcoder_mask = 0;
10349 	unsigned long enabled_panel_transcoders = 0;
10350 	enum transcoder panel_transcoder;
10351 	intel_wakeref_t wf;
10352 	u32 tmp;
10353 
10354 	if (INTEL_GEN(dev_priv) >= 11)
10355 		panel_transcoder_mask |=
10356 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10357 
10358 	if (HAS_TRANSCODER_EDP(dev_priv))
10359 		panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10360 
10361 	/*
10362 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
10363 	 * and DSI transcoders handled below.
10364 	 */
10365 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10366 
10367 	/*
10368 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10369 	 * consistency and less surprising code; it's in always on power).
10370 	 */
10371 	for_each_set_bit(panel_transcoder,
10372 			 &panel_transcoder_mask,
10373 			 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10374 		bool force_thru = false;
10375 		enum pipe trans_pipe;
10376 
10377 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10378 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10379 			continue;
10380 
10381 		/*
10382 		 * Log all enabled ones, only use the first one.
10383 		 *
10384 		 * FIXME: This won't work for two separate DSI displays.
10385 		 */
10386 		enabled_panel_transcoders |= BIT(panel_transcoder);
10387 		if (enabled_panel_transcoders != BIT(panel_transcoder))
10388 			continue;
10389 
10390 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10391 		default:
10392 			WARN(1, "unknown pipe linked to transcoder %s\n",
10393 			     transcoder_name(panel_transcoder));
10394 			/* fall through */
10395 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
10396 			force_thru = true;
10397 			/* fall through */
10398 		case TRANS_DDI_EDP_INPUT_A_ON:
10399 			trans_pipe = PIPE_A;
10400 			break;
10401 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
10402 			trans_pipe = PIPE_B;
10403 			break;
10404 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
10405 			trans_pipe = PIPE_C;
10406 			break;
10407 		}
10408 
10409 		if (trans_pipe == crtc->pipe) {
10410 			pipe_config->cpu_transcoder = panel_transcoder;
10411 			pipe_config->pch_pfit.force_thru = force_thru;
10412 		}
10413 	}
10414 
10415 	/*
10416 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10417 	 */
10418 	WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10419 		enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10420 
10421 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10422 	WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10423 
10424 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10425 	if (!wf)
10426 		return false;
10427 
10428 	wakerefs[power_domain] = wf;
10429 	*power_domain_mask |= BIT_ULL(power_domain);
10430 
10431 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10432 
10433 	return tmp & PIPECONF_ENABLE;
10434 }
10435 
10436 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10437 					 struct intel_crtc_state *pipe_config,
10438 					 u64 *power_domain_mask,
10439 					 intel_wakeref_t *wakerefs)
10440 {
10441 	struct drm_device *dev = crtc->base.dev;
10442 	struct drm_i915_private *dev_priv = to_i915(dev);
10443 	enum intel_display_power_domain power_domain;
10444 	enum transcoder cpu_transcoder;
10445 	intel_wakeref_t wf;
10446 	enum port port;
10447 	u32 tmp;
10448 
10449 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10450 		if (port == PORT_A)
10451 			cpu_transcoder = TRANSCODER_DSI_A;
10452 		else
10453 			cpu_transcoder = TRANSCODER_DSI_C;
10454 
10455 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10456 		WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10457 
10458 		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10459 		if (!wf)
10460 			continue;
10461 
10462 		wakerefs[power_domain] = wf;
10463 		*power_domain_mask |= BIT_ULL(power_domain);
10464 
10465 		/*
10466 		 * The PLL needs to be enabled with a valid divider
10467 		 * configuration, otherwise accessing DSI registers will hang
10468 		 * the machine. See BSpec North Display Engine
10469 		 * registers/MIPI[BXT]. We can break out here early, since we
10470 		 * need the same DSI PLL to be enabled for both DSI ports.
10471 		 */
10472 		if (!bxt_dsi_pll_is_enabled(dev_priv))
10473 			break;
10474 
10475 		/* XXX: this works for video mode only */
10476 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10477 		if (!(tmp & DPI_ENABLE))
10478 			continue;
10479 
10480 		tmp = I915_READ(MIPI_CTRL(port));
10481 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10482 			continue;
10483 
10484 		pipe_config->cpu_transcoder = cpu_transcoder;
10485 		break;
10486 	}
10487 
10488 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
10489 }
10490 
10491 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10492 				       struct intel_crtc_state *pipe_config)
10493 {
10494 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10495 	struct intel_shared_dpll *pll;
10496 	enum port port;
10497 	u32 tmp;
10498 
10499 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10500 
10501 	if (INTEL_GEN(dev_priv) >= 12)
10502 		port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10503 	else
10504 		port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10505 
10506 	if (INTEL_GEN(dev_priv) >= 11)
10507 		icelake_get_ddi_pll(dev_priv, port, pipe_config);
10508 	else if (IS_CANNONLAKE(dev_priv))
10509 		cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10510 	else if (IS_GEN9_BC(dev_priv))
10511 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
10512 	else if (IS_GEN9_LP(dev_priv))
10513 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
10514 	else
10515 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
10516 
10517 	pll = pipe_config->shared_dpll;
10518 	if (pll) {
10519 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10520 						&pipe_config->dpll_hw_state));
10521 	}
10522 
10523 	/*
10524 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10525 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
10526 	 * the PCH transcoder is on.
10527 	 */
10528 	if (INTEL_GEN(dev_priv) < 9 &&
10529 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10530 		pipe_config->has_pch_encoder = true;
10531 
10532 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10533 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10534 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10535 
10536 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
10537 	}
10538 }
10539 
10540 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
10541 						 enum transcoder cpu_transcoder)
10542 {
10543 	u32 trans_port_sync, master_select;
10544 
10545 	trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
10546 
10547 	if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
10548 		return INVALID_TRANSCODER;
10549 
10550 	master_select = trans_port_sync &
10551 			PORT_SYNC_MODE_MASTER_SELECT_MASK;
10552 	if (master_select == 0)
10553 		return TRANSCODER_EDP;
10554 	else
10555 		return master_select - 1;
10556 }
10557 
10558 static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
10559 {
10560 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
10561 	u32 transcoders;
10562 	enum transcoder cpu_transcoder;
10563 
10564 	crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
10565 								  crtc_state->cpu_transcoder);
10566 
10567 	transcoders = BIT(TRANSCODER_A) |
10568 		BIT(TRANSCODER_B) |
10569 		BIT(TRANSCODER_C) |
10570 		BIT(TRANSCODER_D);
10571 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
10572 		enum intel_display_power_domain power_domain;
10573 		intel_wakeref_t trans_wakeref;
10574 
10575 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10576 		trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
10577 								   power_domain);
10578 
10579 		if (!trans_wakeref)
10580 			continue;
10581 
10582 		if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
10583 		    crtc_state->cpu_transcoder)
10584 			crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
10585 
10586 		intel_display_power_put(dev_priv, power_domain, trans_wakeref);
10587 	}
10588 
10589 	WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
10590 		crtc_state->sync_mode_slaves_mask);
10591 }
10592 
10593 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10594 				    struct intel_crtc_state *pipe_config)
10595 {
10596 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10597 	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10598 	enum intel_display_power_domain power_domain;
10599 	u64 power_domain_mask;
10600 	bool active;
10601 
10602 	intel_crtc_init_scalers(crtc, pipe_config);
10603 
10604 	pipe_config->master_transcoder = INVALID_TRANSCODER;
10605 
10606 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10607 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10608 	if (!wf)
10609 		return false;
10610 
10611 	wakerefs[power_domain] = wf;
10612 	power_domain_mask = BIT_ULL(power_domain);
10613 
10614 	pipe_config->shared_dpll = NULL;
10615 
10616 	active = hsw_get_transcoder_state(crtc, pipe_config,
10617 					  &power_domain_mask, wakerefs);
10618 
10619 	if (IS_GEN9_LP(dev_priv) &&
10620 	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
10621 					 &power_domain_mask, wakerefs)) {
10622 		WARN_ON(active);
10623 		active = true;
10624 	}
10625 
10626 	if (!active)
10627 		goto out;
10628 
10629 	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10630 	    INTEL_GEN(dev_priv) >= 11) {
10631 		haswell_get_ddi_port_state(crtc, pipe_config);
10632 		intel_get_pipe_timings(crtc, pipe_config);
10633 	}
10634 
10635 	intel_get_pipe_src_size(crtc, pipe_config);
10636 
10637 	if (IS_HASWELL(dev_priv)) {
10638 		u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10639 
10640 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
10641 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10642 		else
10643 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10644 	} else {
10645 		pipe_config->output_format =
10646 			bdw_get_pipemisc_output_format(crtc);
10647 
10648 		/*
10649 		 * Currently there is no interface defined to
10650 		 * check user preference between RGB/YCBCR444
10651 		 * or YCBCR420. So the only possible case for
10652 		 * YCBCR444 usage is driving YCBCR420 output
10653 		 * with LSPCON, when pipe is configured for
10654 		 * YCBCR444 output and LSPCON takes care of
10655 		 * downsampling it.
10656 		 */
10657 		pipe_config->lspcon_downsampling =
10658 			pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
10659 	}
10660 
10661 	pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10662 
10663 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10664 
10665 	if (INTEL_GEN(dev_priv) >= 9) {
10666 		u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10667 
10668 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10669 			pipe_config->gamma_enable = true;
10670 
10671 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10672 			pipe_config->csc_enable = true;
10673 	} else {
10674 		i9xx_get_pipe_color_config(pipe_config);
10675 	}
10676 
10677 	intel_color_get_config(pipe_config);
10678 
10679 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10680 	WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10681 
10682 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10683 	if (wf) {
10684 		wakerefs[power_domain] = wf;
10685 		power_domain_mask |= BIT_ULL(power_domain);
10686 
10687 		if (INTEL_GEN(dev_priv) >= 9)
10688 			skylake_get_pfit_config(crtc, pipe_config);
10689 		else
10690 			ironlake_get_pfit_config(crtc, pipe_config);
10691 	}
10692 
10693 	if (hsw_crtc_supports_ips(crtc)) {
10694 		if (IS_HASWELL(dev_priv))
10695 			pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10696 		else {
10697 			/*
10698 			 * We cannot readout IPS state on broadwell, set to
10699 			 * true so we can set it to a defined state on first
10700 			 * commit.
10701 			 */
10702 			pipe_config->ips_enabled = true;
10703 		}
10704 	}
10705 
10706 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10707 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10708 		pipe_config->pixel_multiplier =
10709 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10710 	} else {
10711 		pipe_config->pixel_multiplier = 1;
10712 	}
10713 
10714 	if (INTEL_GEN(dev_priv) >= 11 &&
10715 	    !transcoder_is_dsi(pipe_config->cpu_transcoder))
10716 		icelake_get_trans_port_sync_config(pipe_config);
10717 
10718 out:
10719 	for_each_power_domain(power_domain, power_domain_mask)
10720 		intel_display_power_put(dev_priv,
10721 					power_domain, wakerefs[power_domain]);
10722 
10723 	return active;
10724 }
10725 
10726 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10727 {
10728 	struct drm_i915_private *dev_priv =
10729 		to_i915(plane_state->base.plane->dev);
10730 	const struct drm_framebuffer *fb = plane_state->base.fb;
10731 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10732 	u32 base;
10733 
10734 	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10735 		base = obj->phys_handle->busaddr;
10736 	else
10737 		base = intel_plane_ggtt_offset(plane_state);
10738 
10739 	return base + plane_state->color_plane[0].offset;
10740 }
10741 
10742 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10743 {
10744 	int x = plane_state->base.dst.x1;
10745 	int y = plane_state->base.dst.y1;
10746 	u32 pos = 0;
10747 
10748 	if (x < 0) {
10749 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10750 		x = -x;
10751 	}
10752 	pos |= x << CURSOR_X_SHIFT;
10753 
10754 	if (y < 0) {
10755 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10756 		y = -y;
10757 	}
10758 	pos |= y << CURSOR_Y_SHIFT;
10759 
10760 	return pos;
10761 }
10762 
10763 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10764 {
10765 	const struct drm_mode_config *config =
10766 		&plane_state->base.plane->dev->mode_config;
10767 	int width = drm_rect_width(&plane_state->base.dst);
10768 	int height = drm_rect_height(&plane_state->base.dst);
10769 
10770 	return width > 0 && width <= config->cursor_width &&
10771 		height > 0 && height <= config->cursor_height;
10772 }
10773 
10774 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10775 {
10776 	struct drm_i915_private *dev_priv =
10777 		to_i915(plane_state->base.plane->dev);
10778 	unsigned int rotation = plane_state->base.rotation;
10779 	int src_x, src_y;
10780 	u32 offset;
10781 	int ret;
10782 
10783 	ret = intel_plane_compute_gtt(plane_state);
10784 	if (ret)
10785 		return ret;
10786 
10787 	if (!plane_state->base.visible)
10788 		return 0;
10789 
10790 	src_x = plane_state->base.src.x1 >> 16;
10791 	src_y = plane_state->base.src.y1 >> 16;
10792 
10793 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10794 	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10795 						    plane_state, 0);
10796 
10797 	if (src_x != 0 || src_y != 0) {
10798 		DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10799 		return -EINVAL;
10800 	}
10801 
10802 	/*
10803 	 * Put the final coordinates back so that the src
10804 	 * coordinate checks will see the right values.
10805 	 */
10806 	drm_rect_translate_to(&plane_state->base.src,
10807 			      src_x << 16, src_y << 16);
10808 
10809 	/* ILK+ do this automagically in hardware */
10810 	if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
10811 		const struct drm_framebuffer *fb = plane_state->base.fb;
10812 		int src_w = drm_rect_width(&plane_state->base.src) >> 16;
10813 		int src_h = drm_rect_height(&plane_state->base.src) >> 16;
10814 
10815 		offset += (src_h * src_w - 1) * fb->format->cpp[0];
10816 	}
10817 
10818 	plane_state->color_plane[0].offset = offset;
10819 	plane_state->color_plane[0].x = src_x;
10820 	plane_state->color_plane[0].y = src_y;
10821 
10822 	return 0;
10823 }
10824 
10825 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10826 			      struct intel_plane_state *plane_state)
10827 {
10828 	const struct drm_framebuffer *fb = plane_state->base.fb;
10829 	int ret;
10830 
10831 	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10832 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
10833 		return -EINVAL;
10834 	}
10835 
10836 	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10837 						  &crtc_state->base,
10838 						  DRM_PLANE_HELPER_NO_SCALING,
10839 						  DRM_PLANE_HELPER_NO_SCALING,
10840 						  true, true);
10841 	if (ret)
10842 		return ret;
10843 
10844 	/* Use the unclipped src/dst rectangles, which we program to hw */
10845 	plane_state->base.src = drm_plane_state_src(&plane_state->base);
10846 	plane_state->base.dst = drm_plane_state_dest(&plane_state->base);
10847 
10848 	ret = intel_cursor_check_surface(plane_state);
10849 	if (ret)
10850 		return ret;
10851 
10852 	if (!plane_state->base.visible)
10853 		return 0;
10854 
10855 	ret = intel_plane_check_src_coordinates(plane_state);
10856 	if (ret)
10857 		return ret;
10858 
10859 	return 0;
10860 }
10861 
10862 static unsigned int
10863 i845_cursor_max_stride(struct intel_plane *plane,
10864 		       u32 pixel_format, u64 modifier,
10865 		       unsigned int rotation)
10866 {
10867 	return 2048;
10868 }
10869 
10870 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10871 {
10872 	u32 cntl = 0;
10873 
10874 	if (crtc_state->gamma_enable)
10875 		cntl |= CURSOR_GAMMA_ENABLE;
10876 
10877 	return cntl;
10878 }
10879 
10880 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10881 			   const struct intel_plane_state *plane_state)
10882 {
10883 	return CURSOR_ENABLE |
10884 		CURSOR_FORMAT_ARGB |
10885 		CURSOR_STRIDE(plane_state->color_plane[0].stride);
10886 }
10887 
10888 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10889 {
10890 	int width = drm_rect_width(&plane_state->base.dst);
10891 
10892 	/*
10893 	 * 845g/865g are only limited by the width of their cursors,
10894 	 * the height is arbitrary up to the precision of the register.
10895 	 */
10896 	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10897 }
10898 
10899 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10900 			     struct intel_plane_state *plane_state)
10901 {
10902 	const struct drm_framebuffer *fb = plane_state->base.fb;
10903 	int ret;
10904 
10905 	ret = intel_check_cursor(crtc_state, plane_state);
10906 	if (ret)
10907 		return ret;
10908 
10909 	/* if we want to turn off the cursor ignore width and height */
10910 	if (!fb)
10911 		return 0;
10912 
10913 	/* Check for which cursor types we support */
10914 	if (!i845_cursor_size_ok(plane_state)) {
10915 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10916 			  drm_rect_width(&plane_state->base.dst),
10917 			  drm_rect_height(&plane_state->base.dst));
10918 		return -EINVAL;
10919 	}
10920 
10921 	WARN_ON(plane_state->base.visible &&
10922 		plane_state->color_plane[0].stride != fb->pitches[0]);
10923 
10924 	switch (fb->pitches[0]) {
10925 	case 256:
10926 	case 512:
10927 	case 1024:
10928 	case 2048:
10929 		break;
10930 	default:
10931 		DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10932 			      fb->pitches[0]);
10933 		return -EINVAL;
10934 	}
10935 
10936 	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10937 
10938 	return 0;
10939 }
10940 
10941 static void i845_update_cursor(struct intel_plane *plane,
10942 			       const struct intel_crtc_state *crtc_state,
10943 			       const struct intel_plane_state *plane_state)
10944 {
10945 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10946 	u32 cntl = 0, base = 0, pos = 0, size = 0;
10947 	unsigned long irqflags;
10948 
10949 	if (plane_state && plane_state->base.visible) {
10950 		unsigned int width = drm_rect_width(&plane_state->base.dst);
10951 		unsigned int height = drm_rect_height(&plane_state->base.dst);
10952 
10953 		cntl = plane_state->ctl |
10954 			i845_cursor_ctl_crtc(crtc_state);
10955 
10956 		size = (height << 12) | width;
10957 
10958 		base = intel_cursor_base(plane_state);
10959 		pos = intel_cursor_position(plane_state);
10960 	}
10961 
10962 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10963 
10964 	/* On these chipsets we can only modify the base/size/stride
10965 	 * whilst the cursor is disabled.
10966 	 */
10967 	if (plane->cursor.base != base ||
10968 	    plane->cursor.size != size ||
10969 	    plane->cursor.cntl != cntl) {
10970 		I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10971 		I915_WRITE_FW(CURBASE(PIPE_A), base);
10972 		I915_WRITE_FW(CURSIZE, size);
10973 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
10974 		I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10975 
10976 		plane->cursor.base = base;
10977 		plane->cursor.size = size;
10978 		plane->cursor.cntl = cntl;
10979 	} else {
10980 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
10981 	}
10982 
10983 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10984 }
10985 
10986 static void i845_disable_cursor(struct intel_plane *plane,
10987 				const struct intel_crtc_state *crtc_state)
10988 {
10989 	i845_update_cursor(plane, crtc_state, NULL);
10990 }
10991 
10992 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10993 				     enum pipe *pipe)
10994 {
10995 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10996 	enum intel_display_power_domain power_domain;
10997 	intel_wakeref_t wakeref;
10998 	bool ret;
10999 
11000 	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11001 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11002 	if (!wakeref)
11003 		return false;
11004 
11005 	ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11006 
11007 	*pipe = PIPE_A;
11008 
11009 	intel_display_power_put(dev_priv, power_domain, wakeref);
11010 
11011 	return ret;
11012 }
11013 
11014 static unsigned int
11015 i9xx_cursor_max_stride(struct intel_plane *plane,
11016 		       u32 pixel_format, u64 modifier,
11017 		       unsigned int rotation)
11018 {
11019 	return plane->base.dev->mode_config.cursor_width * 4;
11020 }
11021 
11022 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11023 {
11024 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11025 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11026 	u32 cntl = 0;
11027 
11028 	if (INTEL_GEN(dev_priv) >= 11)
11029 		return cntl;
11030 
11031 	if (crtc_state->gamma_enable)
11032 		cntl = MCURSOR_GAMMA_ENABLE;
11033 
11034 	if (crtc_state->csc_enable)
11035 		cntl |= MCURSOR_PIPE_CSC_ENABLE;
11036 
11037 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11038 		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11039 
11040 	return cntl;
11041 }
11042 
11043 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11044 			   const struct intel_plane_state *plane_state)
11045 {
11046 	struct drm_i915_private *dev_priv =
11047 		to_i915(plane_state->base.plane->dev);
11048 	u32 cntl = 0;
11049 
11050 	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11051 		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11052 
11053 	switch (drm_rect_width(&plane_state->base.dst)) {
11054 	case 64:
11055 		cntl |= MCURSOR_MODE_64_ARGB_AX;
11056 		break;
11057 	case 128:
11058 		cntl |= MCURSOR_MODE_128_ARGB_AX;
11059 		break;
11060 	case 256:
11061 		cntl |= MCURSOR_MODE_256_ARGB_AX;
11062 		break;
11063 	default:
11064 		MISSING_CASE(drm_rect_width(&plane_state->base.dst));
11065 		return 0;
11066 	}
11067 
11068 	if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
11069 		cntl |= MCURSOR_ROTATE_180;
11070 
11071 	return cntl;
11072 }
11073 
11074 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11075 {
11076 	struct drm_i915_private *dev_priv =
11077 		to_i915(plane_state->base.plane->dev);
11078 	int width = drm_rect_width(&plane_state->base.dst);
11079 	int height = drm_rect_height(&plane_state->base.dst);
11080 
11081 	if (!intel_cursor_size_ok(plane_state))
11082 		return false;
11083 
11084 	/* Cursor width is limited to a few power-of-two sizes */
11085 	switch (width) {
11086 	case 256:
11087 	case 128:
11088 	case 64:
11089 		break;
11090 	default:
11091 		return false;
11092 	}
11093 
11094 	/*
11095 	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11096 	 * height from 8 lines up to the cursor width, when the
11097 	 * cursor is not rotated. Everything else requires square
11098 	 * cursors.
11099 	 */
11100 	if (HAS_CUR_FBC(dev_priv) &&
11101 	    plane_state->base.rotation & DRM_MODE_ROTATE_0) {
11102 		if (height < 8 || height > width)
11103 			return false;
11104 	} else {
11105 		if (height != width)
11106 			return false;
11107 	}
11108 
11109 	return true;
11110 }
11111 
11112 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11113 			     struct intel_plane_state *plane_state)
11114 {
11115 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11116 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11117 	const struct drm_framebuffer *fb = plane_state->base.fb;
11118 	enum pipe pipe = plane->pipe;
11119 	int ret;
11120 
11121 	ret = intel_check_cursor(crtc_state, plane_state);
11122 	if (ret)
11123 		return ret;
11124 
11125 	/* if we want to turn off the cursor ignore width and height */
11126 	if (!fb)
11127 		return 0;
11128 
11129 	/* Check for which cursor types we support */
11130 	if (!i9xx_cursor_size_ok(plane_state)) {
11131 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
11132 			  drm_rect_width(&plane_state->base.dst),
11133 			  drm_rect_height(&plane_state->base.dst));
11134 		return -EINVAL;
11135 	}
11136 
11137 	WARN_ON(plane_state->base.visible &&
11138 		plane_state->color_plane[0].stride != fb->pitches[0]);
11139 
11140 	if (fb->pitches[0] !=
11141 	    drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) {
11142 		DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
11143 			      fb->pitches[0],
11144 			      drm_rect_width(&plane_state->base.dst));
11145 		return -EINVAL;
11146 	}
11147 
11148 	/*
11149 	 * There's something wrong with the cursor on CHV pipe C.
11150 	 * If it straddles the left edge of the screen then
11151 	 * moving it away from the edge or disabling it often
11152 	 * results in a pipe underrun, and often that can lead to
11153 	 * dead pipe (constant underrun reported, and it scans
11154 	 * out just a solid color). To recover from that, the
11155 	 * display power well must be turned off and on again.
11156 	 * Refuse the put the cursor into that compromised position.
11157 	 */
11158 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11159 	    plane_state->base.visible && plane_state->base.dst.x1 < 0) {
11160 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
11161 		return -EINVAL;
11162 	}
11163 
11164 	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11165 
11166 	return 0;
11167 }
11168 
11169 static void i9xx_update_cursor(struct intel_plane *plane,
11170 			       const struct intel_crtc_state *crtc_state,
11171 			       const struct intel_plane_state *plane_state)
11172 {
11173 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11174 	enum pipe pipe = plane->pipe;
11175 	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11176 	unsigned long irqflags;
11177 
11178 	if (plane_state && plane_state->base.visible) {
11179 		unsigned width = drm_rect_width(&plane_state->base.dst);
11180 		unsigned height = drm_rect_height(&plane_state->base.dst);
11181 
11182 		cntl = plane_state->ctl |
11183 			i9xx_cursor_ctl_crtc(crtc_state);
11184 
11185 		if (width != height)
11186 			fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11187 
11188 		base = intel_cursor_base(plane_state);
11189 		pos = intel_cursor_position(plane_state);
11190 	}
11191 
11192 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11193 
11194 	/*
11195 	 * On some platforms writing CURCNTR first will also
11196 	 * cause CURPOS to be armed by the CURBASE write.
11197 	 * Without the CURCNTR write the CURPOS write would
11198 	 * arm itself. Thus we always update CURCNTR before
11199 	 * CURPOS.
11200 	 *
11201 	 * On other platforms CURPOS always requires the
11202 	 * CURBASE write to arm the update. Additonally
11203 	 * a write to any of the cursor register will cancel
11204 	 * an already armed cursor update. Thus leaving out
11205 	 * the CURBASE write after CURPOS could lead to a
11206 	 * cursor that doesn't appear to move, or even change
11207 	 * shape. Thus we always write CURBASE.
11208 	 *
11209 	 * The other registers are armed by by the CURBASE write
11210 	 * except when the plane is getting enabled at which time
11211 	 * the CURCNTR write arms the update.
11212 	 */
11213 
11214 	if (INTEL_GEN(dev_priv) >= 9)
11215 		skl_write_cursor_wm(plane, crtc_state);
11216 
11217 	if (plane->cursor.base != base ||
11218 	    plane->cursor.size != fbc_ctl ||
11219 	    plane->cursor.cntl != cntl) {
11220 		if (HAS_CUR_FBC(dev_priv))
11221 			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
11222 		I915_WRITE_FW(CURCNTR(pipe), cntl);
11223 		I915_WRITE_FW(CURPOS(pipe), pos);
11224 		I915_WRITE_FW(CURBASE(pipe), base);
11225 
11226 		plane->cursor.base = base;
11227 		plane->cursor.size = fbc_ctl;
11228 		plane->cursor.cntl = cntl;
11229 	} else {
11230 		I915_WRITE_FW(CURPOS(pipe), pos);
11231 		I915_WRITE_FW(CURBASE(pipe), base);
11232 	}
11233 
11234 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11235 }
11236 
11237 static void i9xx_disable_cursor(struct intel_plane *plane,
11238 				const struct intel_crtc_state *crtc_state)
11239 {
11240 	i9xx_update_cursor(plane, crtc_state, NULL);
11241 }
11242 
11243 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11244 				     enum pipe *pipe)
11245 {
11246 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11247 	enum intel_display_power_domain power_domain;
11248 	intel_wakeref_t wakeref;
11249 	bool ret;
11250 	u32 val;
11251 
11252 	/*
11253 	 * Not 100% correct for planes that can move between pipes,
11254 	 * but that's only the case for gen2-3 which don't have any
11255 	 * display power wells.
11256 	 */
11257 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11258 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11259 	if (!wakeref)
11260 		return false;
11261 
11262 	val = I915_READ(CURCNTR(plane->pipe));
11263 
11264 	ret = val & MCURSOR_MODE;
11265 
11266 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11267 		*pipe = plane->pipe;
11268 	else
11269 		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11270 			MCURSOR_PIPE_SELECT_SHIFT;
11271 
11272 	intel_display_power_put(dev_priv, power_domain, wakeref);
11273 
11274 	return ret;
11275 }
11276 
11277 /* VESA 640x480x72Hz mode to set on the pipe */
11278 static const struct drm_display_mode load_detect_mode = {
11279 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11280 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11281 };
11282 
11283 struct drm_framebuffer *
11284 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11285 			 struct drm_mode_fb_cmd2 *mode_cmd)
11286 {
11287 	struct intel_framebuffer *intel_fb;
11288 	int ret;
11289 
11290 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11291 	if (!intel_fb)
11292 		return ERR_PTR(-ENOMEM);
11293 
11294 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11295 	if (ret)
11296 		goto err;
11297 
11298 	return &intel_fb->base;
11299 
11300 err:
11301 	kfree(intel_fb);
11302 	return ERR_PTR(ret);
11303 }
11304 
11305 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11306 					struct drm_crtc *crtc)
11307 {
11308 	struct drm_plane *plane;
11309 	struct drm_plane_state *plane_state;
11310 	int ret, i;
11311 
11312 	ret = drm_atomic_add_affected_planes(state, crtc);
11313 	if (ret)
11314 		return ret;
11315 
11316 	for_each_new_plane_in_state(state, plane, plane_state, i) {
11317 		if (plane_state->crtc != crtc)
11318 			continue;
11319 
11320 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11321 		if (ret)
11322 			return ret;
11323 
11324 		drm_atomic_set_fb_for_plane(plane_state, NULL);
11325 	}
11326 
11327 	return 0;
11328 }
11329 
11330 int intel_get_load_detect_pipe(struct drm_connector *connector,
11331 			       struct intel_load_detect_pipe *old,
11332 			       struct drm_modeset_acquire_ctx *ctx)
11333 {
11334 	struct intel_crtc *intel_crtc;
11335 	struct intel_encoder *intel_encoder =
11336 		intel_attached_encoder(connector);
11337 	struct drm_crtc *possible_crtc;
11338 	struct drm_encoder *encoder = &intel_encoder->base;
11339 	struct drm_crtc *crtc = NULL;
11340 	struct drm_device *dev = encoder->dev;
11341 	struct drm_i915_private *dev_priv = to_i915(dev);
11342 	struct drm_mode_config *config = &dev->mode_config;
11343 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
11344 	struct drm_connector_state *connector_state;
11345 	struct intel_crtc_state *crtc_state;
11346 	int ret, i = -1;
11347 
11348 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11349 		      connector->base.id, connector->name,
11350 		      encoder->base.id, encoder->name);
11351 
11352 	old->restore_state = NULL;
11353 
11354 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11355 
11356 	/*
11357 	 * Algorithm gets a little messy:
11358 	 *
11359 	 *   - if the connector already has an assigned crtc, use it (but make
11360 	 *     sure it's on first)
11361 	 *
11362 	 *   - try to find the first unused crtc that can drive this connector,
11363 	 *     and use that if we find one
11364 	 */
11365 
11366 	/* See if we already have a CRTC for this connector */
11367 	if (connector->state->crtc) {
11368 		crtc = connector->state->crtc;
11369 
11370 		ret = drm_modeset_lock(&crtc->mutex, ctx);
11371 		if (ret)
11372 			goto fail;
11373 
11374 		/* Make sure the crtc and connector are running */
11375 		goto found;
11376 	}
11377 
11378 	/* Find an unused one (if possible) */
11379 	for_each_crtc(dev, possible_crtc) {
11380 		i++;
11381 		if (!(encoder->possible_crtcs & (1 << i)))
11382 			continue;
11383 
11384 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11385 		if (ret)
11386 			goto fail;
11387 
11388 		if (possible_crtc->state->enable) {
11389 			drm_modeset_unlock(&possible_crtc->mutex);
11390 			continue;
11391 		}
11392 
11393 		crtc = possible_crtc;
11394 		break;
11395 	}
11396 
11397 	/*
11398 	 * If we didn't find an unused CRTC, don't use any.
11399 	 */
11400 	if (!crtc) {
11401 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
11402 		ret = -ENODEV;
11403 		goto fail;
11404 	}
11405 
11406 found:
11407 	intel_crtc = to_intel_crtc(crtc);
11408 
11409 	state = drm_atomic_state_alloc(dev);
11410 	restore_state = drm_atomic_state_alloc(dev);
11411 	if (!state || !restore_state) {
11412 		ret = -ENOMEM;
11413 		goto fail;
11414 	}
11415 
11416 	state->acquire_ctx = ctx;
11417 	restore_state->acquire_ctx = ctx;
11418 
11419 	connector_state = drm_atomic_get_connector_state(state, connector);
11420 	if (IS_ERR(connector_state)) {
11421 		ret = PTR_ERR(connector_state);
11422 		goto fail;
11423 	}
11424 
11425 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11426 	if (ret)
11427 		goto fail;
11428 
11429 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11430 	if (IS_ERR(crtc_state)) {
11431 		ret = PTR_ERR(crtc_state);
11432 		goto fail;
11433 	}
11434 
11435 	crtc_state->base.active = crtc_state->base.enable = true;
11436 
11437 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base,
11438 					   &load_detect_mode);
11439 	if (ret)
11440 		goto fail;
11441 
11442 	ret = intel_modeset_disable_planes(state, crtc);
11443 	if (ret)
11444 		goto fail;
11445 
11446 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11447 	if (!ret)
11448 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11449 	if (!ret)
11450 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
11451 	if (ret) {
11452 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11453 		goto fail;
11454 	}
11455 
11456 	ret = drm_atomic_commit(state);
11457 	if (ret) {
11458 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11459 		goto fail;
11460 	}
11461 
11462 	old->restore_state = restore_state;
11463 	drm_atomic_state_put(state);
11464 
11465 	/* let the connector get through one full cycle before testing */
11466 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11467 	return true;
11468 
11469 fail:
11470 	if (state) {
11471 		drm_atomic_state_put(state);
11472 		state = NULL;
11473 	}
11474 	if (restore_state) {
11475 		drm_atomic_state_put(restore_state);
11476 		restore_state = NULL;
11477 	}
11478 
11479 	if (ret == -EDEADLK)
11480 		return ret;
11481 
11482 	return false;
11483 }
11484 
11485 void intel_release_load_detect_pipe(struct drm_connector *connector,
11486 				    struct intel_load_detect_pipe *old,
11487 				    struct drm_modeset_acquire_ctx *ctx)
11488 {
11489 	struct intel_encoder *intel_encoder =
11490 		intel_attached_encoder(connector);
11491 	struct drm_encoder *encoder = &intel_encoder->base;
11492 	struct drm_atomic_state *state = old->restore_state;
11493 	int ret;
11494 
11495 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11496 		      connector->base.id, connector->name,
11497 		      encoder->base.id, encoder->name);
11498 
11499 	if (!state)
11500 		return;
11501 
11502 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11503 	if (ret)
11504 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11505 	drm_atomic_state_put(state);
11506 }
11507 
11508 static int i9xx_pll_refclk(struct drm_device *dev,
11509 			   const struct intel_crtc_state *pipe_config)
11510 {
11511 	struct drm_i915_private *dev_priv = to_i915(dev);
11512 	u32 dpll = pipe_config->dpll_hw_state.dpll;
11513 
11514 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11515 		return dev_priv->vbt.lvds_ssc_freq;
11516 	else if (HAS_PCH_SPLIT(dev_priv))
11517 		return 120000;
11518 	else if (!IS_GEN(dev_priv, 2))
11519 		return 96000;
11520 	else
11521 		return 48000;
11522 }
11523 
11524 /* Returns the clock of the currently programmed mode of the given pipe. */
11525 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11526 				struct intel_crtc_state *pipe_config)
11527 {
11528 	struct drm_device *dev = crtc->base.dev;
11529 	struct drm_i915_private *dev_priv = to_i915(dev);
11530 	enum pipe pipe = crtc->pipe;
11531 	u32 dpll = pipe_config->dpll_hw_state.dpll;
11532 	u32 fp;
11533 	struct dpll clock;
11534 	int port_clock;
11535 	int refclk = i9xx_pll_refclk(dev, pipe_config);
11536 
11537 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11538 		fp = pipe_config->dpll_hw_state.fp0;
11539 	else
11540 		fp = pipe_config->dpll_hw_state.fp1;
11541 
11542 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11543 	if (IS_PINEVIEW(dev_priv)) {
11544 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11545 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11546 	} else {
11547 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11548 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11549 	}
11550 
11551 	if (!IS_GEN(dev_priv, 2)) {
11552 		if (IS_PINEVIEW(dev_priv))
11553 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11554 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11555 		else
11556 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11557 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
11558 
11559 		switch (dpll & DPLL_MODE_MASK) {
11560 		case DPLLB_MODE_DAC_SERIAL:
11561 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11562 				5 : 10;
11563 			break;
11564 		case DPLLB_MODE_LVDS:
11565 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11566 				7 : 14;
11567 			break;
11568 		default:
11569 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11570 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
11571 			return;
11572 		}
11573 
11574 		if (IS_PINEVIEW(dev_priv))
11575 			port_clock = pnv_calc_dpll_params(refclk, &clock);
11576 		else
11577 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
11578 	} else {
11579 		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11580 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11581 
11582 		if (is_lvds) {
11583 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11584 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
11585 
11586 			if (lvds & LVDS_CLKB_POWER_UP)
11587 				clock.p2 = 7;
11588 			else
11589 				clock.p2 = 14;
11590 		} else {
11591 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
11592 				clock.p1 = 2;
11593 			else {
11594 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11595 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11596 			}
11597 			if (dpll & PLL_P2_DIVIDE_BY_4)
11598 				clock.p2 = 4;
11599 			else
11600 				clock.p2 = 2;
11601 		}
11602 
11603 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
11604 	}
11605 
11606 	/*
11607 	 * This value includes pixel_multiplier. We will use
11608 	 * port_clock to compute adjusted_mode.crtc_clock in the
11609 	 * encoder's get_config() function.
11610 	 */
11611 	pipe_config->port_clock = port_clock;
11612 }
11613 
11614 int intel_dotclock_calculate(int link_freq,
11615 			     const struct intel_link_m_n *m_n)
11616 {
11617 	/*
11618 	 * The calculation for the data clock is:
11619 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11620 	 * But we want to avoid losing precison if possible, so:
11621 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11622 	 *
11623 	 * and the link clock is simpler:
11624 	 * link_clock = (m * link_clock) / n
11625 	 */
11626 
11627 	if (!m_n->link_n)
11628 		return 0;
11629 
11630 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11631 }
11632 
11633 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11634 				   struct intel_crtc_state *pipe_config)
11635 {
11636 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11637 
11638 	/* read out port_clock from the DPLL */
11639 	i9xx_crtc_clock_get(crtc, pipe_config);
11640 
11641 	/*
11642 	 * In case there is an active pipe without active ports,
11643 	 * we may need some idea for the dotclock anyway.
11644 	 * Calculate one based on the FDI configuration.
11645 	 */
11646 	pipe_config->base.adjusted_mode.crtc_clock =
11647 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11648 					 &pipe_config->fdi_m_n);
11649 }
11650 
11651 /* Returns the currently programmed mode of the given encoder. */
11652 struct drm_display_mode *
11653 intel_encoder_current_mode(struct intel_encoder *encoder)
11654 {
11655 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11656 	struct intel_crtc_state *crtc_state;
11657 	struct drm_display_mode *mode;
11658 	struct intel_crtc *crtc;
11659 	enum pipe pipe;
11660 
11661 	if (!encoder->get_hw_state(encoder, &pipe))
11662 		return NULL;
11663 
11664 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11665 
11666 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11667 	if (!mode)
11668 		return NULL;
11669 
11670 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11671 	if (!crtc_state) {
11672 		kfree(mode);
11673 		return NULL;
11674 	}
11675 
11676 	crtc_state->base.crtc = &crtc->base;
11677 
11678 	if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11679 		kfree(crtc_state);
11680 		kfree(mode);
11681 		return NULL;
11682 	}
11683 
11684 	encoder->get_config(encoder, crtc_state);
11685 
11686 	intel_mode_from_pipe_config(mode, crtc_state);
11687 
11688 	kfree(crtc_state);
11689 
11690 	return mode;
11691 }
11692 
11693 static void intel_crtc_destroy(struct drm_crtc *crtc)
11694 {
11695 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11696 
11697 	drm_crtc_cleanup(crtc);
11698 	kfree(intel_crtc);
11699 }
11700 
11701 /**
11702  * intel_wm_need_update - Check whether watermarks need updating
11703  * @cur: current plane state
11704  * @new: new plane state
11705  *
11706  * Check current plane state versus the new one to determine whether
11707  * watermarks need to be recalculated.
11708  *
11709  * Returns true or false.
11710  */
11711 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11712 				 struct intel_plane_state *new)
11713 {
11714 	/* Update watermarks on tiling or size changes. */
11715 	if (new->base.visible != cur->base.visible)
11716 		return true;
11717 
11718 	if (!cur->base.fb || !new->base.fb)
11719 		return false;
11720 
11721 	if (cur->base.fb->modifier != new->base.fb->modifier ||
11722 	    cur->base.rotation != new->base.rotation ||
11723 	    drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11724 	    drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11725 	    drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11726 	    drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11727 		return true;
11728 
11729 	return false;
11730 }
11731 
11732 static bool needs_scaling(const struct intel_plane_state *state)
11733 {
11734 	int src_w = drm_rect_width(&state->base.src) >> 16;
11735 	int src_h = drm_rect_height(&state->base.src) >> 16;
11736 	int dst_w = drm_rect_width(&state->base.dst);
11737 	int dst_h = drm_rect_height(&state->base.dst);
11738 
11739 	return (src_w != dst_w || src_h != dst_h);
11740 }
11741 
11742 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11743 				    struct intel_crtc_state *crtc_state,
11744 				    const struct intel_plane_state *old_plane_state,
11745 				    struct intel_plane_state *plane_state)
11746 {
11747 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11748 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11749 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11750 	bool mode_changed = needs_modeset(crtc_state);
11751 	bool was_crtc_enabled = old_crtc_state->base.active;
11752 	bool is_crtc_enabled = crtc_state->base.active;
11753 	bool turn_off, turn_on, visible, was_visible;
11754 	int ret;
11755 
11756 	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11757 		ret = skl_update_scaler_plane(crtc_state, plane_state);
11758 		if (ret)
11759 			return ret;
11760 	}
11761 
11762 	was_visible = old_plane_state->base.visible;
11763 	visible = plane_state->base.visible;
11764 
11765 	if (!was_crtc_enabled && WARN_ON(was_visible))
11766 		was_visible = false;
11767 
11768 	/*
11769 	 * Visibility is calculated as if the crtc was on, but
11770 	 * after scaler setup everything depends on it being off
11771 	 * when the crtc isn't active.
11772 	 *
11773 	 * FIXME this is wrong for watermarks. Watermarks should also
11774 	 * be computed as if the pipe would be active. Perhaps move
11775 	 * per-plane wm computation to the .check_plane() hook, and
11776 	 * only combine the results from all planes in the current place?
11777 	 */
11778 	if (!is_crtc_enabled) {
11779 		plane_state->base.visible = visible = false;
11780 		crtc_state->active_planes &= ~BIT(plane->id);
11781 		crtc_state->data_rate[plane->id] = 0;
11782 		crtc_state->min_cdclk[plane->id] = 0;
11783 	}
11784 
11785 	if (!was_visible && !visible)
11786 		return 0;
11787 
11788 	turn_off = was_visible && (!visible || mode_changed);
11789 	turn_on = visible && (!was_visible || mode_changed);
11790 
11791 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11792 			 crtc->base.base.id, crtc->base.name,
11793 			 plane->base.base.id, plane->base.name,
11794 			 was_visible, visible,
11795 			 turn_off, turn_on, mode_changed);
11796 
11797 	if (turn_on) {
11798 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11799 			crtc_state->update_wm_pre = true;
11800 
11801 		/* must disable cxsr around plane enable/disable */
11802 		if (plane->id != PLANE_CURSOR)
11803 			crtc_state->disable_cxsr = true;
11804 	} else if (turn_off) {
11805 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11806 			crtc_state->update_wm_post = true;
11807 
11808 		/* must disable cxsr around plane enable/disable */
11809 		if (plane->id != PLANE_CURSOR)
11810 			crtc_state->disable_cxsr = true;
11811 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
11812 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11813 			/* FIXME bollocks */
11814 			crtc_state->update_wm_pre = true;
11815 			crtc_state->update_wm_post = true;
11816 		}
11817 	}
11818 
11819 	if (visible || was_visible)
11820 		crtc_state->fb_bits |= plane->frontbuffer_bit;
11821 
11822 	/*
11823 	 * ILK/SNB DVSACNTR/Sprite Enable
11824 	 * IVB SPR_CTL/Sprite Enable
11825 	 * "When in Self Refresh Big FIFO mode, a write to enable the
11826 	 *  plane will be internally buffered and delayed while Big FIFO
11827 	 *  mode is exiting."
11828 	 *
11829 	 * Which means that enabling the sprite can take an extra frame
11830 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
11831 	 * down to LP0 and wait for vblank in order to make sure the
11832 	 * sprite gets enabled on the next vblank after the register write.
11833 	 * Doing otherwise would risk enabling the sprite one frame after
11834 	 * we've already signalled flip completion. We can resume LP1+
11835 	 * once the sprite has been enabled.
11836 	 *
11837 	 *
11838 	 * WaCxSRDisabledForSpriteScaling:ivb
11839 	 * IVB SPR_SCALE/Scaling Enable
11840 	 * "Low Power watermarks must be disabled for at least one
11841 	 *  frame before enabling sprite scaling, and kept disabled
11842 	 *  until sprite scaling is disabled."
11843 	 *
11844 	 * ILK/SNB DVSASCALE/Scaling Enable
11845 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
11846 	 *  masked off while Big FIFO mode is exiting."
11847 	 *
11848 	 * Despite the w/a only being listed for IVB we assume that
11849 	 * the ILK/SNB note has similar ramifications, hence we apply
11850 	 * the w/a on all three platforms.
11851 	 *
11852 	 * With experimental results seems this is needed also for primary
11853 	 * plane, not only sprite plane.
11854 	 */
11855 	if (plane->id != PLANE_CURSOR &&
11856 	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
11857 	     IS_IVYBRIDGE(dev_priv)) &&
11858 	    (turn_on || (!needs_scaling(old_plane_state) &&
11859 			 needs_scaling(plane_state))))
11860 		crtc_state->disable_lp_wm = true;
11861 
11862 	return 0;
11863 }
11864 
11865 static bool encoders_cloneable(const struct intel_encoder *a,
11866 			       const struct intel_encoder *b)
11867 {
11868 	/* masks could be asymmetric, so check both ways */
11869 	return a == b || (a->cloneable & (1 << b->type) &&
11870 			  b->cloneable & (1 << a->type));
11871 }
11872 
11873 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11874 					 struct intel_crtc *crtc,
11875 					 struct intel_encoder *encoder)
11876 {
11877 	struct intel_encoder *source_encoder;
11878 	struct drm_connector *connector;
11879 	struct drm_connector_state *connector_state;
11880 	int i;
11881 
11882 	for_each_new_connector_in_state(state, connector, connector_state, i) {
11883 		if (connector_state->crtc != &crtc->base)
11884 			continue;
11885 
11886 		source_encoder =
11887 			to_intel_encoder(connector_state->best_encoder);
11888 		if (!encoders_cloneable(encoder, source_encoder))
11889 			return false;
11890 	}
11891 
11892 	return true;
11893 }
11894 
11895 static int icl_add_linked_planes(struct intel_atomic_state *state)
11896 {
11897 	struct intel_plane *plane, *linked;
11898 	struct intel_plane_state *plane_state, *linked_plane_state;
11899 	int i;
11900 
11901 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11902 		linked = plane_state->planar_linked_plane;
11903 
11904 		if (!linked)
11905 			continue;
11906 
11907 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
11908 		if (IS_ERR(linked_plane_state))
11909 			return PTR_ERR(linked_plane_state);
11910 
11911 		WARN_ON(linked_plane_state->planar_linked_plane != plane);
11912 		WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
11913 	}
11914 
11915 	return 0;
11916 }
11917 
11918 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11919 {
11920 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11921 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11922 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11923 	struct intel_plane *plane, *linked;
11924 	struct intel_plane_state *plane_state;
11925 	int i;
11926 
11927 	if (INTEL_GEN(dev_priv) < 11)
11928 		return 0;
11929 
11930 	/*
11931 	 * Destroy all old plane links and make the slave plane invisible
11932 	 * in the crtc_state->active_planes mask.
11933 	 */
11934 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11935 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
11936 			continue;
11937 
11938 		plane_state->planar_linked_plane = NULL;
11939 		if (plane_state->planar_slave && !plane_state->base.visible) {
11940 			crtc_state->active_planes &= ~BIT(plane->id);
11941 			crtc_state->update_planes |= BIT(plane->id);
11942 		}
11943 
11944 		plane_state->planar_slave = false;
11945 	}
11946 
11947 	if (!crtc_state->nv12_planes)
11948 		return 0;
11949 
11950 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11951 		struct intel_plane_state *linked_state = NULL;
11952 
11953 		if (plane->pipe != crtc->pipe ||
11954 		    !(crtc_state->nv12_planes & BIT(plane->id)))
11955 			continue;
11956 
11957 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11958 			if (!icl_is_nv12_y_plane(linked->id))
11959 				continue;
11960 
11961 			if (crtc_state->active_planes & BIT(linked->id))
11962 				continue;
11963 
11964 			linked_state = intel_atomic_get_plane_state(state, linked);
11965 			if (IS_ERR(linked_state))
11966 				return PTR_ERR(linked_state);
11967 
11968 			break;
11969 		}
11970 
11971 		if (!linked_state) {
11972 			DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11973 				      hweight8(crtc_state->nv12_planes));
11974 
11975 			return -EINVAL;
11976 		}
11977 
11978 		plane_state->planar_linked_plane = linked;
11979 
11980 		linked_state->planar_slave = true;
11981 		linked_state->planar_linked_plane = plane;
11982 		crtc_state->active_planes |= BIT(linked->id);
11983 		crtc_state->update_planes |= BIT(linked->id);
11984 		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11985 	}
11986 
11987 	return 0;
11988 }
11989 
11990 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11991 {
11992 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11993 	struct intel_atomic_state *state =
11994 		to_intel_atomic_state(new_crtc_state->base.state);
11995 	const struct intel_crtc_state *old_crtc_state =
11996 		intel_atomic_get_old_crtc_state(state, crtc);
11997 
11998 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11999 }
12000 
12001 static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
12002 {
12003 	struct drm_crtc *crtc = crtc_state->base.crtc;
12004 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
12005 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
12006 	struct drm_connector *master_connector, *connector;
12007 	struct drm_connector_state *connector_state;
12008 	struct drm_connector_list_iter conn_iter;
12009 	struct drm_crtc *master_crtc = NULL;
12010 	struct drm_crtc_state *master_crtc_state;
12011 	struct intel_crtc_state *master_pipe_config;
12012 	int i, tile_group_id;
12013 
12014 	if (INTEL_GEN(dev_priv) < 11)
12015 		return 0;
12016 
12017 	/*
12018 	 * In case of tiled displays there could be one or more slaves but there is
12019 	 * only one master. Lets make the CRTC used by the connector corresponding
12020 	 * to the last horizonal and last vertical tile a master/genlock CRTC.
12021 	 * All the other CRTCs corresponding to other tiles of the same Tile group
12022 	 * are the slave CRTCs and hold a pointer to their genlock CRTC.
12023 	 */
12024 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
12025 		if (connector_state->crtc != crtc)
12026 			continue;
12027 		if (!connector->has_tile)
12028 			continue;
12029 		if (crtc_state->base.mode.hdisplay != connector->tile_h_size ||
12030 		    crtc_state->base.mode.vdisplay != connector->tile_v_size)
12031 			return 0;
12032 		if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12033 		    connector->tile_v_loc == connector->num_v_tile - 1)
12034 			continue;
12035 		crtc_state->sync_mode_slaves_mask = 0;
12036 		tile_group_id = connector->tile_group->id;
12037 		drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12038 		drm_for_each_connector_iter(master_connector, &conn_iter) {
12039 			struct drm_connector_state *master_conn_state = NULL;
12040 
12041 			if (!master_connector->has_tile)
12042 				continue;
12043 			if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12044 			    master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12045 				continue;
12046 			if (master_connector->tile_group->id != tile_group_id)
12047 				continue;
12048 
12049 			master_conn_state = drm_atomic_get_connector_state(&state->base,
12050 									   master_connector);
12051 			if (IS_ERR(master_conn_state)) {
12052 				drm_connector_list_iter_end(&conn_iter);
12053 				return PTR_ERR(master_conn_state);
12054 			}
12055 			if (master_conn_state->crtc) {
12056 				master_crtc = master_conn_state->crtc;
12057 				break;
12058 			}
12059 		}
12060 		drm_connector_list_iter_end(&conn_iter);
12061 
12062 		if (!master_crtc) {
12063 			DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
12064 				      connector_state->crtc->base.id);
12065 			return -EINVAL;
12066 		}
12067 
12068 		master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12069 							      master_crtc);
12070 		if (IS_ERR(master_crtc_state))
12071 			return PTR_ERR(master_crtc_state);
12072 
12073 		master_pipe_config = to_intel_crtc_state(master_crtc_state);
12074 		crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12075 		master_pipe_config->sync_mode_slaves_mask |=
12076 			BIT(crtc_state->cpu_transcoder);
12077 		DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12078 			      transcoder_name(crtc_state->master_transcoder),
12079 			      crtc_state->base.crtc->base.id,
12080 			      master_pipe_config->sync_mode_slaves_mask);
12081 	}
12082 
12083 	return 0;
12084 }
12085 
12086 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12087 				   struct intel_crtc *crtc)
12088 {
12089 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12090 	struct intel_crtc_state *crtc_state =
12091 		intel_atomic_get_new_crtc_state(state, crtc);
12092 	bool mode_changed = needs_modeset(crtc_state);
12093 	int ret;
12094 
12095 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12096 	    mode_changed && !crtc_state->base.active)
12097 		crtc_state->update_wm_post = true;
12098 
12099 	if (mode_changed && crtc_state->base.enable &&
12100 	    dev_priv->display.crtc_compute_clock &&
12101 	    !WARN_ON(crtc_state->shared_dpll)) {
12102 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12103 		if (ret)
12104 			return ret;
12105 	}
12106 
12107 	/*
12108 	 * May need to update pipe gamma enable bits
12109 	 * when C8 planes are getting enabled/disabled.
12110 	 */
12111 	if (c8_planes_changed(crtc_state))
12112 		crtc_state->base.color_mgmt_changed = true;
12113 
12114 	if (mode_changed || crtc_state->update_pipe ||
12115 	    crtc_state->base.color_mgmt_changed) {
12116 		ret = intel_color_check(crtc_state);
12117 		if (ret)
12118 			return ret;
12119 	}
12120 
12121 	ret = 0;
12122 	if (dev_priv->display.compute_pipe_wm) {
12123 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
12124 		if (ret) {
12125 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12126 			return ret;
12127 		}
12128 	}
12129 
12130 	if (dev_priv->display.compute_intermediate_wm) {
12131 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12132 			return 0;
12133 
12134 		/*
12135 		 * Calculate 'intermediate' watermarks that satisfy both the
12136 		 * old state and the new state.  We can program these
12137 		 * immediately.
12138 		 */
12139 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12140 		if (ret) {
12141 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12142 			return ret;
12143 		}
12144 	}
12145 
12146 	if (INTEL_GEN(dev_priv) >= 9) {
12147 		if (mode_changed || crtc_state->update_pipe)
12148 			ret = skl_update_scaler_crtc(crtc_state);
12149 		if (!ret)
12150 			ret = intel_atomic_setup_scalers(dev_priv, crtc,
12151 							 crtc_state);
12152 	}
12153 
12154 	if (HAS_IPS(dev_priv))
12155 		crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
12156 
12157 	return ret;
12158 }
12159 
12160 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12161 {
12162 	struct intel_connector *connector;
12163 	struct drm_connector_list_iter conn_iter;
12164 
12165 	drm_connector_list_iter_begin(dev, &conn_iter);
12166 	for_each_intel_connector_iter(connector, &conn_iter) {
12167 		if (connector->base.state->crtc)
12168 			drm_connector_put(&connector->base);
12169 
12170 		if (connector->base.encoder) {
12171 			connector->base.state->best_encoder =
12172 				connector->base.encoder;
12173 			connector->base.state->crtc =
12174 				connector->base.encoder->crtc;
12175 
12176 			drm_connector_get(&connector->base);
12177 		} else {
12178 			connector->base.state->best_encoder = NULL;
12179 			connector->base.state->crtc = NULL;
12180 		}
12181 	}
12182 	drm_connector_list_iter_end(&conn_iter);
12183 }
12184 
12185 static int
12186 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12187 		      struct intel_crtc_state *pipe_config)
12188 {
12189 	struct drm_connector *connector = conn_state->connector;
12190 	const struct drm_display_info *info = &connector->display_info;
12191 	int bpp;
12192 
12193 	switch (conn_state->max_bpc) {
12194 	case 6 ... 7:
12195 		bpp = 6 * 3;
12196 		break;
12197 	case 8 ... 9:
12198 		bpp = 8 * 3;
12199 		break;
12200 	case 10 ... 11:
12201 		bpp = 10 * 3;
12202 		break;
12203 	case 12:
12204 		bpp = 12 * 3;
12205 		break;
12206 	default:
12207 		return -EINVAL;
12208 	}
12209 
12210 	if (bpp < pipe_config->pipe_bpp) {
12211 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12212 			      "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12213 			      connector->base.id, connector->name,
12214 			      bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
12215 			      pipe_config->pipe_bpp);
12216 
12217 		pipe_config->pipe_bpp = bpp;
12218 	}
12219 
12220 	return 0;
12221 }
12222 
12223 static int
12224 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12225 			  struct intel_crtc_state *pipe_config)
12226 {
12227 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12228 	struct drm_atomic_state *state = pipe_config->base.state;
12229 	struct drm_connector *connector;
12230 	struct drm_connector_state *connector_state;
12231 	int bpp, i;
12232 
12233 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12234 	    IS_CHERRYVIEW(dev_priv)))
12235 		bpp = 10*3;
12236 	else if (INTEL_GEN(dev_priv) >= 5)
12237 		bpp = 12*3;
12238 	else
12239 		bpp = 8*3;
12240 
12241 	pipe_config->pipe_bpp = bpp;
12242 
12243 	/* Clamp display bpp to connector max bpp */
12244 	for_each_new_connector_in_state(state, connector, connector_state, i) {
12245 		int ret;
12246 
12247 		if (connector_state->crtc != &crtc->base)
12248 			continue;
12249 
12250 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12251 		if (ret)
12252 			return ret;
12253 	}
12254 
12255 	return 0;
12256 }
12257 
12258 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12259 {
12260 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12261 		      "type: 0x%x flags: 0x%x\n",
12262 		      mode->crtc_clock,
12263 		      mode->crtc_hdisplay, mode->crtc_hsync_start,
12264 		      mode->crtc_hsync_end, mode->crtc_htotal,
12265 		      mode->crtc_vdisplay, mode->crtc_vsync_start,
12266 		      mode->crtc_vsync_end, mode->crtc_vtotal,
12267 		      mode->type, mode->flags);
12268 }
12269 
12270 static inline void
12271 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12272 		      const char *id, unsigned int lane_count,
12273 		      const struct intel_link_m_n *m_n)
12274 {
12275 	DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12276 		      id, lane_count,
12277 		      m_n->gmch_m, m_n->gmch_n,
12278 		      m_n->link_m, m_n->link_n, m_n->tu);
12279 }
12280 
12281 static void
12282 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12283 		     const union hdmi_infoframe *frame)
12284 {
12285 	if ((drm_debug & DRM_UT_KMS) == 0)
12286 		return;
12287 
12288 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12289 }
12290 
12291 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12292 
12293 static const char * const output_type_str[] = {
12294 	OUTPUT_TYPE(UNUSED),
12295 	OUTPUT_TYPE(ANALOG),
12296 	OUTPUT_TYPE(DVO),
12297 	OUTPUT_TYPE(SDVO),
12298 	OUTPUT_TYPE(LVDS),
12299 	OUTPUT_TYPE(TVOUT),
12300 	OUTPUT_TYPE(HDMI),
12301 	OUTPUT_TYPE(DP),
12302 	OUTPUT_TYPE(EDP),
12303 	OUTPUT_TYPE(DSI),
12304 	OUTPUT_TYPE(DDI),
12305 	OUTPUT_TYPE(DP_MST),
12306 };
12307 
12308 #undef OUTPUT_TYPE
12309 
12310 static void snprintf_output_types(char *buf, size_t len,
12311 				  unsigned int output_types)
12312 {
12313 	char *str = buf;
12314 	int i;
12315 
12316 	str[0] = '\0';
12317 
12318 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12319 		int r;
12320 
12321 		if ((output_types & BIT(i)) == 0)
12322 			continue;
12323 
12324 		r = snprintf(str, len, "%s%s",
12325 			     str != buf ? "," : "", output_type_str[i]);
12326 		if (r >= len)
12327 			break;
12328 		str += r;
12329 		len -= r;
12330 
12331 		output_types &= ~BIT(i);
12332 	}
12333 
12334 	WARN_ON_ONCE(output_types != 0);
12335 }
12336 
12337 static const char * const output_format_str[] = {
12338 	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12339 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12340 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12341 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12342 };
12343 
12344 static const char *output_formats(enum intel_output_format format)
12345 {
12346 	if (format >= ARRAY_SIZE(output_format_str))
12347 		format = INTEL_OUTPUT_FORMAT_INVALID;
12348 	return output_format_str[format];
12349 }
12350 
12351 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12352 {
12353 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
12354 	const struct drm_framebuffer *fb = plane_state->base.fb;
12355 	struct drm_format_name_buf format_name;
12356 
12357 	if (!fb) {
12358 		DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12359 			      plane->base.base.id, plane->base.name,
12360 			      yesno(plane_state->base.visible));
12361 		return;
12362 	}
12363 
12364 	DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12365 		      plane->base.base.id, plane->base.name,
12366 		      fb->base.id, fb->width, fb->height,
12367 		      drm_get_format_name(fb->format->format, &format_name),
12368 		      yesno(plane_state->base.visible));
12369 	DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
12370 		      plane_state->base.rotation, plane_state->scaler_id);
12371 	if (plane_state->base.visible)
12372 		DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12373 			      DRM_RECT_FP_ARG(&plane_state->base.src),
12374 			      DRM_RECT_ARG(&plane_state->base.dst));
12375 }
12376 
12377 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12378 				   struct intel_atomic_state *state,
12379 				   const char *context)
12380 {
12381 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
12382 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12383 	const struct intel_plane_state *plane_state;
12384 	struct intel_plane *plane;
12385 	char buf[64];
12386 	int i;
12387 
12388 	DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
12389 		      crtc->base.base.id, crtc->base.name,
12390 		      yesno(pipe_config->base.enable), context);
12391 
12392 	if (!pipe_config->base.enable)
12393 		goto dump_planes;
12394 
12395 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
12396 	DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
12397 		      yesno(pipe_config->base.active),
12398 		      buf, pipe_config->output_types,
12399 		      output_formats(pipe_config->output_format));
12400 
12401 	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
12402 		      transcoder_name(pipe_config->cpu_transcoder),
12403 		      pipe_config->pipe_bpp, pipe_config->dither);
12404 
12405 	if (pipe_config->has_pch_encoder)
12406 		intel_dump_m_n_config(pipe_config, "fdi",
12407 				      pipe_config->fdi_lanes,
12408 				      &pipe_config->fdi_m_n);
12409 
12410 	if (intel_crtc_has_dp_encoder(pipe_config)) {
12411 		intel_dump_m_n_config(pipe_config, "dp m_n",
12412 				pipe_config->lane_count, &pipe_config->dp_m_n);
12413 		if (pipe_config->has_drrs)
12414 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
12415 					      pipe_config->lane_count,
12416 					      &pipe_config->dp_m2_n2);
12417 	}
12418 
12419 	DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
12420 		      pipe_config->has_audio, pipe_config->has_infoframe,
12421 		      pipe_config->infoframes.enable);
12422 
12423 	if (pipe_config->infoframes.enable &
12424 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
12425 		DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
12426 	if (pipe_config->infoframes.enable &
12427 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
12428 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
12429 	if (pipe_config->infoframes.enable &
12430 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
12431 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
12432 	if (pipe_config->infoframes.enable &
12433 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
12434 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
12435 
12436 	DRM_DEBUG_KMS("requested mode:\n");
12437 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12438 	DRM_DEBUG_KMS("adjusted mode:\n");
12439 	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12440 	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12441 	DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
12442 		      pipe_config->port_clock,
12443 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
12444 		      pipe_config->pixel_rate);
12445 
12446 	if (INTEL_GEN(dev_priv) >= 9)
12447 		DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12448 			      crtc->num_scalers,
12449 			      pipe_config->scaler_state.scaler_users,
12450 		              pipe_config->scaler_state.scaler_id);
12451 
12452 	if (HAS_GMCH(dev_priv))
12453 		DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12454 			      pipe_config->gmch_pfit.control,
12455 			      pipe_config->gmch_pfit.pgm_ratios,
12456 			      pipe_config->gmch_pfit.lvds_border_bits);
12457 	else
12458 		DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
12459 			      pipe_config->pch_pfit.pos,
12460 			      pipe_config->pch_pfit.size,
12461 			      enableddisabled(pipe_config->pch_pfit.enabled),
12462 			      yesno(pipe_config->pch_pfit.force_thru));
12463 
12464 	DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
12465 		      pipe_config->ips_enabled, pipe_config->double_wide);
12466 
12467 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
12468 
12469 	if (IS_CHERRYVIEW(dev_priv))
12470 		DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12471 			      pipe_config->cgm_mode, pipe_config->gamma_mode,
12472 			      pipe_config->gamma_enable, pipe_config->csc_enable);
12473 	else
12474 		DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
12475 			      pipe_config->csc_mode, pipe_config->gamma_mode,
12476 			      pipe_config->gamma_enable, pipe_config->csc_enable);
12477 
12478 dump_planes:
12479 	if (!state)
12480 		return;
12481 
12482 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12483 		if (plane->pipe == crtc->pipe)
12484 			intel_dump_plane_state(plane_state);
12485 	}
12486 }
12487 
12488 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12489 {
12490 	struct drm_device *dev = state->base.dev;
12491 	struct drm_connector *connector;
12492 	struct drm_connector_list_iter conn_iter;
12493 	unsigned int used_ports = 0;
12494 	unsigned int used_mst_ports = 0;
12495 	bool ret = true;
12496 
12497 	/*
12498 	 * We're going to peek into connector->state,
12499 	 * hence connection_mutex must be held.
12500 	 */
12501 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
12502 
12503 	/*
12504 	 * Walk the connector list instead of the encoder
12505 	 * list to detect the problem on ddi platforms
12506 	 * where there's just one encoder per digital port.
12507 	 */
12508 	drm_connector_list_iter_begin(dev, &conn_iter);
12509 	drm_for_each_connector_iter(connector, &conn_iter) {
12510 		struct drm_connector_state *connector_state;
12511 		struct intel_encoder *encoder;
12512 
12513 		connector_state =
12514 			drm_atomic_get_new_connector_state(&state->base,
12515 							   connector);
12516 		if (!connector_state)
12517 			connector_state = connector->state;
12518 
12519 		if (!connector_state->best_encoder)
12520 			continue;
12521 
12522 		encoder = to_intel_encoder(connector_state->best_encoder);
12523 
12524 		WARN_ON(!connector_state->crtc);
12525 
12526 		switch (encoder->type) {
12527 			unsigned int port_mask;
12528 		case INTEL_OUTPUT_DDI:
12529 			if (WARN_ON(!HAS_DDI(to_i915(dev))))
12530 				break;
12531 			/* else, fall through */
12532 		case INTEL_OUTPUT_DP:
12533 		case INTEL_OUTPUT_HDMI:
12534 		case INTEL_OUTPUT_EDP:
12535 			port_mask = 1 << encoder->port;
12536 
12537 			/* the same port mustn't appear more than once */
12538 			if (used_ports & port_mask)
12539 				ret = false;
12540 
12541 			used_ports |= port_mask;
12542 			break;
12543 		case INTEL_OUTPUT_DP_MST:
12544 			used_mst_ports |=
12545 				1 << encoder->port;
12546 			break;
12547 		default:
12548 			break;
12549 		}
12550 	}
12551 	drm_connector_list_iter_end(&conn_iter);
12552 
12553 	/* can't mix MST and SST/HDMI on the same port */
12554 	if (used_ports & used_mst_ports)
12555 		return false;
12556 
12557 	return ret;
12558 }
12559 
12560 static int
12561 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12562 {
12563 	struct drm_i915_private *dev_priv =
12564 		to_i915(crtc_state->base.crtc->dev);
12565 	struct intel_crtc_state *saved_state;
12566 
12567 	saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12568 	if (!saved_state)
12569 		return -ENOMEM;
12570 
12571 	/* FIXME: before the switch to atomic started, a new pipe_config was
12572 	 * kzalloc'd. Code that depends on any field being zero should be
12573 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12574 	 * only fields that are know to not cause problems are preserved. */
12575 
12576 	saved_state->scaler_state = crtc_state->scaler_state;
12577 	saved_state->shared_dpll = crtc_state->shared_dpll;
12578 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12579 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
12580 	       sizeof(saved_state->icl_port_dplls));
12581 	saved_state->crc_enabled = crtc_state->crc_enabled;
12582 	if (IS_G4X(dev_priv) ||
12583 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12584 		saved_state->wm = crtc_state->wm;
12585 	/*
12586 	 * Save the slave bitmask which gets filled for master crtc state during
12587 	 * slave atomic check call.
12588 	 */
12589 	if (is_trans_port_sync_master(crtc_state))
12590 		saved_state->sync_mode_slaves_mask =
12591 			crtc_state->sync_mode_slaves_mask;
12592 
12593 	/* Keep base drm_crtc_state intact, only clear our extended struct */
12594 	BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12595 	memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12596 	       sizeof(*crtc_state) - sizeof(crtc_state->base));
12597 
12598 	kfree(saved_state);
12599 	return 0;
12600 }
12601 
12602 static int
12603 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12604 {
12605 	struct drm_crtc *crtc = pipe_config->base.crtc;
12606 	struct drm_atomic_state *state = pipe_config->base.state;
12607 	struct intel_encoder *encoder;
12608 	struct drm_connector *connector;
12609 	struct drm_connector_state *connector_state;
12610 	int base_bpp, ret;
12611 	int i;
12612 	bool retry = true;
12613 
12614 	ret = clear_intel_crtc_state(pipe_config);
12615 	if (ret)
12616 		return ret;
12617 
12618 	pipe_config->cpu_transcoder =
12619 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12620 
12621 	/*
12622 	 * Sanitize sync polarity flags based on requested ones. If neither
12623 	 * positive or negative polarity is requested, treat this as meaning
12624 	 * negative polarity.
12625 	 */
12626 	if (!(pipe_config->base.adjusted_mode.flags &
12627 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12628 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12629 
12630 	if (!(pipe_config->base.adjusted_mode.flags &
12631 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12632 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12633 
12634 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12635 					pipe_config);
12636 	if (ret)
12637 		return ret;
12638 
12639 	base_bpp = pipe_config->pipe_bpp;
12640 
12641 	/*
12642 	 * Determine the real pipe dimensions. Note that stereo modes can
12643 	 * increase the actual pipe size due to the frame doubling and
12644 	 * insertion of additional space for blanks between the frame. This
12645 	 * is stored in the crtc timings. We use the requested mode to do this
12646 	 * computation to clearly distinguish it from the adjusted mode, which
12647 	 * can be changed by the connectors in the below retry loop.
12648 	 */
12649 	drm_mode_get_hv_timing(&pipe_config->base.mode,
12650 			       &pipe_config->pipe_src_w,
12651 			       &pipe_config->pipe_src_h);
12652 
12653 	for_each_new_connector_in_state(state, connector, connector_state, i) {
12654 		if (connector_state->crtc != crtc)
12655 			continue;
12656 
12657 		encoder = to_intel_encoder(connector_state->best_encoder);
12658 
12659 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12660 			DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12661 			return -EINVAL;
12662 		}
12663 
12664 		/*
12665 		 * Determine output_types before calling the .compute_config()
12666 		 * hooks so that the hooks can use this information safely.
12667 		 */
12668 		if (encoder->compute_output_type)
12669 			pipe_config->output_types |=
12670 				BIT(encoder->compute_output_type(encoder, pipe_config,
12671 								 connector_state));
12672 		else
12673 			pipe_config->output_types |= BIT(encoder->type);
12674 	}
12675 
12676 encoder_retry:
12677 	/* Ensure the port clock defaults are reset when retrying. */
12678 	pipe_config->port_clock = 0;
12679 	pipe_config->pixel_multiplier = 1;
12680 
12681 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12682 	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12683 			      CRTC_STEREO_DOUBLE);
12684 
12685 	/* Set the crtc_state defaults for trans_port_sync */
12686 	pipe_config->master_transcoder = INVALID_TRANSCODER;
12687 	ret = icl_add_sync_mode_crtcs(pipe_config);
12688 	if (ret) {
12689 		DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
12690 			      ret);
12691 		return ret;
12692 	}
12693 
12694 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12695 	 * adjust it according to limitations or connector properties, and also
12696 	 * a chance to reject the mode entirely.
12697 	 */
12698 	for_each_new_connector_in_state(state, connector, connector_state, i) {
12699 		if (connector_state->crtc != crtc)
12700 			continue;
12701 
12702 		encoder = to_intel_encoder(connector_state->best_encoder);
12703 		ret = encoder->compute_config(encoder, pipe_config,
12704 					      connector_state);
12705 		if (ret < 0) {
12706 			if (ret != -EDEADLK)
12707 				DRM_DEBUG_KMS("Encoder config failure: %d\n",
12708 					      ret);
12709 			return ret;
12710 		}
12711 	}
12712 
12713 	/* Set default port clock if not overwritten by the encoder. Needs to be
12714 	 * done afterwards in case the encoder adjusts the mode. */
12715 	if (!pipe_config->port_clock)
12716 		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12717 			* pipe_config->pixel_multiplier;
12718 
12719 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12720 	if (ret == -EDEADLK)
12721 		return ret;
12722 	if (ret < 0) {
12723 		DRM_DEBUG_KMS("CRTC fixup failed\n");
12724 		return ret;
12725 	}
12726 
12727 	if (ret == RETRY) {
12728 		if (WARN(!retry, "loop in pipe configuration computation\n"))
12729 			return -EINVAL;
12730 
12731 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12732 		retry = false;
12733 		goto encoder_retry;
12734 	}
12735 
12736 	/* Dithering seems to not pass-through bits correctly when it should, so
12737 	 * only enable it on 6bpc panels and when its not a compliance
12738 	 * test requesting 6bpc video pattern.
12739 	 */
12740 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12741 		!pipe_config->dither_force_disable;
12742 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12743 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12744 
12745 	return 0;
12746 }
12747 
12748 bool intel_fuzzy_clock_check(int clock1, int clock2)
12749 {
12750 	int diff;
12751 
12752 	if (clock1 == clock2)
12753 		return true;
12754 
12755 	if (!clock1 || !clock2)
12756 		return false;
12757 
12758 	diff = abs(clock1 - clock2);
12759 
12760 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12761 		return true;
12762 
12763 	return false;
12764 }
12765 
12766 static bool
12767 intel_compare_m_n(unsigned int m, unsigned int n,
12768 		  unsigned int m2, unsigned int n2,
12769 		  bool exact)
12770 {
12771 	if (m == m2 && n == n2)
12772 		return true;
12773 
12774 	if (exact || !m || !n || !m2 || !n2)
12775 		return false;
12776 
12777 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12778 
12779 	if (n > n2) {
12780 		while (n > n2) {
12781 			m2 <<= 1;
12782 			n2 <<= 1;
12783 		}
12784 	} else if (n < n2) {
12785 		while (n < n2) {
12786 			m <<= 1;
12787 			n <<= 1;
12788 		}
12789 	}
12790 
12791 	if (n != n2)
12792 		return false;
12793 
12794 	return intel_fuzzy_clock_check(m, m2);
12795 }
12796 
12797 static bool
12798 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12799 		       const struct intel_link_m_n *m2_n2,
12800 		       bool exact)
12801 {
12802 	return m_n->tu == m2_n2->tu &&
12803 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12804 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12805 		intel_compare_m_n(m_n->link_m, m_n->link_n,
12806 				  m2_n2->link_m, m2_n2->link_n, exact);
12807 }
12808 
12809 static bool
12810 intel_compare_infoframe(const union hdmi_infoframe *a,
12811 			const union hdmi_infoframe *b)
12812 {
12813 	return memcmp(a, b, sizeof(*a)) == 0;
12814 }
12815 
12816 static void
12817 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12818 			       bool fastset, const char *name,
12819 			       const union hdmi_infoframe *a,
12820 			       const union hdmi_infoframe *b)
12821 {
12822 	if (fastset) {
12823 		if ((drm_debug & DRM_UT_KMS) == 0)
12824 			return;
12825 
12826 		DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
12827 		DRM_DEBUG_KMS("expected:\n");
12828 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12829 		DRM_DEBUG_KMS("found:\n");
12830 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12831 	} else {
12832 		DRM_ERROR("mismatch in %s infoframe\n", name);
12833 		DRM_ERROR("expected:\n");
12834 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12835 		DRM_ERROR("found:\n");
12836 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12837 	}
12838 }
12839 
12840 static void __printf(4, 5)
12841 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
12842 		     const char *name, const char *format, ...)
12843 {
12844 	struct va_format vaf;
12845 	va_list args;
12846 
12847 	va_start(args, format);
12848 	vaf.fmt = format;
12849 	vaf.va = &args;
12850 
12851 	if (fastset)
12852 		DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
12853 			      crtc->base.base.id, crtc->base.name, name, &vaf);
12854 	else
12855 		DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
12856 			  crtc->base.base.id, crtc->base.name, name, &vaf);
12857 
12858 	va_end(args);
12859 }
12860 
12861 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12862 {
12863 	if (i915_modparams.fastboot != -1)
12864 		return i915_modparams.fastboot;
12865 
12866 	/* Enable fastboot by default on Skylake and newer */
12867 	if (INTEL_GEN(dev_priv) >= 9)
12868 		return true;
12869 
12870 	/* Enable fastboot by default on VLV and CHV */
12871 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12872 		return true;
12873 
12874 	/* Disabled by default on all others */
12875 	return false;
12876 }
12877 
12878 static bool
12879 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12880 			  const struct intel_crtc_state *pipe_config,
12881 			  bool fastset)
12882 {
12883 	struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
12884 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
12885 	bool ret = true;
12886 	u32 bp_gamma = 0;
12887 	bool fixup_inherited = fastset &&
12888 		(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12889 		!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12890 
12891 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12892 		DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12893 		ret = false;
12894 	}
12895 
12896 #define PIPE_CONF_CHECK_X(name) do { \
12897 	if (current_config->name != pipe_config->name) { \
12898 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
12899 				     "(expected 0x%08x, found 0x%08x)", \
12900 				     current_config->name, \
12901 				     pipe_config->name); \
12902 		ret = false; \
12903 	} \
12904 } while (0)
12905 
12906 #define PIPE_CONF_CHECK_I(name) do { \
12907 	if (current_config->name != pipe_config->name) { \
12908 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
12909 				     "(expected %i, found %i)", \
12910 				     current_config->name, \
12911 				     pipe_config->name); \
12912 		ret = false; \
12913 	} \
12914 } while (0)
12915 
12916 #define PIPE_CONF_CHECK_BOOL(name) do { \
12917 	if (current_config->name != pipe_config->name) { \
12918 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
12919 				     "(expected %s, found %s)", \
12920 				     yesno(current_config->name), \
12921 				     yesno(pipe_config->name)); \
12922 		ret = false; \
12923 	} \
12924 } while (0)
12925 
12926 /*
12927  * Checks state where we only read out the enabling, but not the entire
12928  * state itself (like full infoframes or ELD for audio). These states
12929  * require a full modeset on bootup to fix up.
12930  */
12931 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12932 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12933 		PIPE_CONF_CHECK_BOOL(name); \
12934 	} else { \
12935 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
12936 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
12937 				     yesno(current_config->name), \
12938 				     yesno(pipe_config->name)); \
12939 		ret = false; \
12940 	} \
12941 } while (0)
12942 
12943 #define PIPE_CONF_CHECK_P(name) do { \
12944 	if (current_config->name != pipe_config->name) { \
12945 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
12946 				     "(expected %p, found %p)", \
12947 				     current_config->name, \
12948 				     pipe_config->name); \
12949 		ret = false; \
12950 	} \
12951 } while (0)
12952 
12953 #define PIPE_CONF_CHECK_M_N(name) do { \
12954 	if (!intel_compare_link_m_n(&current_config->name, \
12955 				    &pipe_config->name,\
12956 				    !fastset)) { \
12957 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
12958 				     "(expected tu %i gmch %i/%i link %i/%i, " \
12959 				     "found tu %i, gmch %i/%i link %i/%i)", \
12960 				     current_config->name.tu, \
12961 				     current_config->name.gmch_m, \
12962 				     current_config->name.gmch_n, \
12963 				     current_config->name.link_m, \
12964 				     current_config->name.link_n, \
12965 				     pipe_config->name.tu, \
12966 				     pipe_config->name.gmch_m, \
12967 				     pipe_config->name.gmch_n, \
12968 				     pipe_config->name.link_m, \
12969 				     pipe_config->name.link_n); \
12970 		ret = false; \
12971 	} \
12972 } while (0)
12973 
12974 /* This is required for BDW+ where there is only one set of registers for
12975  * switching between high and low RR.
12976  * This macro can be used whenever a comparison has to be made between one
12977  * hw state and multiple sw state variables.
12978  */
12979 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12980 	if (!intel_compare_link_m_n(&current_config->name, \
12981 				    &pipe_config->name, !fastset) && \
12982 	    !intel_compare_link_m_n(&current_config->alt_name, \
12983 				    &pipe_config->name, !fastset)) { \
12984 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
12985 				     "(expected tu %i gmch %i/%i link %i/%i, " \
12986 				     "or tu %i gmch %i/%i link %i/%i, " \
12987 				     "found tu %i, gmch %i/%i link %i/%i)", \
12988 				     current_config->name.tu, \
12989 				     current_config->name.gmch_m, \
12990 				     current_config->name.gmch_n, \
12991 				     current_config->name.link_m, \
12992 				     current_config->name.link_n, \
12993 				     current_config->alt_name.tu, \
12994 				     current_config->alt_name.gmch_m, \
12995 				     current_config->alt_name.gmch_n, \
12996 				     current_config->alt_name.link_m, \
12997 				     current_config->alt_name.link_n, \
12998 				     pipe_config->name.tu, \
12999 				     pipe_config->name.gmch_m, \
13000 				     pipe_config->name.gmch_n, \
13001 				     pipe_config->name.link_m, \
13002 				     pipe_config->name.link_n); \
13003 		ret = false; \
13004 	} \
13005 } while (0)
13006 
13007 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13008 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
13009 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13010 				     "(%x) (expected %i, found %i)", \
13011 				     (mask), \
13012 				     current_config->name & (mask), \
13013 				     pipe_config->name & (mask)); \
13014 		ret = false; \
13015 	} \
13016 } while (0)
13017 
13018 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13019 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13020 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
13021 				     "(expected %i, found %i)", \
13022 				     current_config->name, \
13023 				     pipe_config->name); \
13024 		ret = false; \
13025 	} \
13026 } while (0)
13027 
13028 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13029 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
13030 				     &pipe_config->infoframes.name)) { \
13031 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13032 					       &current_config->infoframes.name, \
13033 					       &pipe_config->infoframes.name); \
13034 		ret = false; \
13035 	} \
13036 } while (0)
13037 
13038 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13039 	if (current_config->name1 != pipe_config->name1) { \
13040 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13041 				"(expected %i, found %i, won't compare lut values)", \
13042 				current_config->name1, \
13043 				pipe_config->name1); \
13044 		ret = false;\
13045 	} else { \
13046 		if (!intel_color_lut_equal(current_config->name2, \
13047 					pipe_config->name2, pipe_config->name1, \
13048 					bit_precision)) { \
13049 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13050 					"hw_state doesn't match sw_state"); \
13051 			ret = false; \
13052 		} \
13053 	} \
13054 } while (0)
13055 
13056 #define PIPE_CONF_QUIRK(quirk) \
13057 	((current_config->quirks | pipe_config->quirks) & (quirk))
13058 
13059 	PIPE_CONF_CHECK_I(cpu_transcoder);
13060 
13061 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13062 	PIPE_CONF_CHECK_I(fdi_lanes);
13063 	PIPE_CONF_CHECK_M_N(fdi_m_n);
13064 
13065 	PIPE_CONF_CHECK_I(lane_count);
13066 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13067 
13068 	if (INTEL_GEN(dev_priv) < 8) {
13069 		PIPE_CONF_CHECK_M_N(dp_m_n);
13070 
13071 		if (current_config->has_drrs)
13072 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
13073 	} else
13074 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13075 
13076 	PIPE_CONF_CHECK_X(output_types);
13077 
13078 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
13079 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
13080 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
13081 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
13082 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
13083 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
13084 
13085 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
13086 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
13087 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
13088 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
13089 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
13090 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
13091 
13092 	PIPE_CONF_CHECK_I(pixel_multiplier);
13093 	PIPE_CONF_CHECK_I(output_format);
13094 	PIPE_CONF_CHECK_I(dc3co_exitline);
13095 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13096 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13097 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13098 		PIPE_CONF_CHECK_BOOL(limited_color_range);
13099 
13100 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13101 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13102 	PIPE_CONF_CHECK_BOOL(has_infoframe);
13103 	PIPE_CONF_CHECK_BOOL(fec_enable);
13104 
13105 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13106 
13107 	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13108 			      DRM_MODE_FLAG_INTERLACE);
13109 
13110 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13111 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13112 				      DRM_MODE_FLAG_PHSYNC);
13113 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13114 				      DRM_MODE_FLAG_NHSYNC);
13115 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13116 				      DRM_MODE_FLAG_PVSYNC);
13117 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
13118 				      DRM_MODE_FLAG_NVSYNC);
13119 	}
13120 
13121 	PIPE_CONF_CHECK_X(gmch_pfit.control);
13122 	/* pfit ratios are autocomputed by the hw on gen4+ */
13123 	if (INTEL_GEN(dev_priv) < 4)
13124 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13125 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13126 
13127 	/*
13128 	 * Changing the EDP transcoder input mux
13129 	 * (A_ONOFF vs. A_ON) requires a full modeset.
13130 	 */
13131 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13132 
13133 	if (!fastset) {
13134 		PIPE_CONF_CHECK_I(pipe_src_w);
13135 		PIPE_CONF_CHECK_I(pipe_src_h);
13136 
13137 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13138 		if (current_config->pch_pfit.enabled) {
13139 			PIPE_CONF_CHECK_X(pch_pfit.pos);
13140 			PIPE_CONF_CHECK_X(pch_pfit.size);
13141 		}
13142 
13143 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13144 		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13145 
13146 		PIPE_CONF_CHECK_X(gamma_mode);
13147 		if (IS_CHERRYVIEW(dev_priv))
13148 			PIPE_CONF_CHECK_X(cgm_mode);
13149 		else
13150 			PIPE_CONF_CHECK_X(csc_mode);
13151 		PIPE_CONF_CHECK_BOOL(gamma_enable);
13152 		PIPE_CONF_CHECK_BOOL(csc_enable);
13153 
13154 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13155 		if (bp_gamma)
13156 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
13157 
13158 	}
13159 
13160 	PIPE_CONF_CHECK_BOOL(double_wide);
13161 
13162 	PIPE_CONF_CHECK_P(shared_dpll);
13163 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13164 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13165 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13166 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13167 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13168 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13169 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13170 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13171 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13172 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13173 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13174 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13175 	PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13176 	PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13177 	PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13178 	PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13179 	PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13180 	PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13181 	PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13182 	PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13183 	PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13184 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13185 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13186 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13187 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13188 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13189 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13190 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13191 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13192 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13193 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13194 
13195 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13196 	PIPE_CONF_CHECK_X(dsi_pll.div);
13197 
13198 	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13199 		PIPE_CONF_CHECK_I(pipe_bpp);
13200 
13201 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
13202 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13203 
13204 	PIPE_CONF_CHECK_I(min_voltage_level);
13205 
13206 	PIPE_CONF_CHECK_X(infoframes.enable);
13207 	PIPE_CONF_CHECK_X(infoframes.gcp);
13208 	PIPE_CONF_CHECK_INFOFRAME(avi);
13209 	PIPE_CONF_CHECK_INFOFRAME(spd);
13210 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
13211 	PIPE_CONF_CHECK_INFOFRAME(drm);
13212 
13213 	PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
13214 	PIPE_CONF_CHECK_I(master_transcoder);
13215 
13216 #undef PIPE_CONF_CHECK_X
13217 #undef PIPE_CONF_CHECK_I
13218 #undef PIPE_CONF_CHECK_BOOL
13219 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13220 #undef PIPE_CONF_CHECK_P
13221 #undef PIPE_CONF_CHECK_FLAGS
13222 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13223 #undef PIPE_CONF_CHECK_COLOR_LUT
13224 #undef PIPE_CONF_QUIRK
13225 
13226 	return ret;
13227 }
13228 
13229 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13230 					   const struct intel_crtc_state *pipe_config)
13231 {
13232 	if (pipe_config->has_pch_encoder) {
13233 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13234 							    &pipe_config->fdi_m_n);
13235 		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
13236 
13237 		/*
13238 		 * FDI already provided one idea for the dotclock.
13239 		 * Yell if the encoder disagrees.
13240 		 */
13241 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13242 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13243 		     fdi_dotclock, dotclock);
13244 	}
13245 }
13246 
13247 static void verify_wm_state(struct intel_crtc *crtc,
13248 			    struct intel_crtc_state *new_crtc_state)
13249 {
13250 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13251 	struct skl_hw_state {
13252 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13253 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13254 		struct skl_ddb_allocation ddb;
13255 		struct skl_pipe_wm wm;
13256 	} *hw;
13257 	struct skl_ddb_allocation *sw_ddb;
13258 	struct skl_pipe_wm *sw_wm;
13259 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
13260 	const enum pipe pipe = crtc->pipe;
13261 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
13262 
13263 	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
13264 		return;
13265 
13266 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
13267 	if (!hw)
13268 		return;
13269 
13270 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
13271 	sw_wm = &new_crtc_state->wm.skl.optimal;
13272 
13273 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
13274 
13275 	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
13276 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
13277 
13278 	if (INTEL_GEN(dev_priv) >= 11 &&
13279 	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
13280 		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
13281 			  sw_ddb->enabled_slices,
13282 			  hw->ddb.enabled_slices);
13283 
13284 	/* planes */
13285 	for_each_universal_plane(dev_priv, pipe, plane) {
13286 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13287 
13288 		hw_plane_wm = &hw->wm.planes[plane];
13289 		sw_plane_wm = &sw_wm->planes[plane];
13290 
13291 		/* Watermarks */
13292 		for (level = 0; level <= max_level; level++) {
13293 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13294 						&sw_plane_wm->wm[level]))
13295 				continue;
13296 
13297 			DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13298 				  pipe_name(pipe), plane + 1, level,
13299 				  sw_plane_wm->wm[level].plane_en,
13300 				  sw_plane_wm->wm[level].plane_res_b,
13301 				  sw_plane_wm->wm[level].plane_res_l,
13302 				  hw_plane_wm->wm[level].plane_en,
13303 				  hw_plane_wm->wm[level].plane_res_b,
13304 				  hw_plane_wm->wm[level].plane_res_l);
13305 		}
13306 
13307 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13308 					 &sw_plane_wm->trans_wm)) {
13309 			DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13310 				  pipe_name(pipe), plane + 1,
13311 				  sw_plane_wm->trans_wm.plane_en,
13312 				  sw_plane_wm->trans_wm.plane_res_b,
13313 				  sw_plane_wm->trans_wm.plane_res_l,
13314 				  hw_plane_wm->trans_wm.plane_en,
13315 				  hw_plane_wm->trans_wm.plane_res_b,
13316 				  hw_plane_wm->trans_wm.plane_res_l);
13317 		}
13318 
13319 		/* DDB */
13320 		hw_ddb_entry = &hw->ddb_y[plane];
13321 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
13322 
13323 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13324 			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
13325 				  pipe_name(pipe), plane + 1,
13326 				  sw_ddb_entry->start, sw_ddb_entry->end,
13327 				  hw_ddb_entry->start, hw_ddb_entry->end);
13328 		}
13329 	}
13330 
13331 	/*
13332 	 * cursor
13333 	 * If the cursor plane isn't active, we may not have updated it's ddb
13334 	 * allocation. In that case since the ddb allocation will be updated
13335 	 * once the plane becomes visible, we can skip this check
13336 	 */
13337 	if (1) {
13338 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
13339 
13340 		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
13341 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
13342 
13343 		/* Watermarks */
13344 		for (level = 0; level <= max_level; level++) {
13345 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
13346 						&sw_plane_wm->wm[level]))
13347 				continue;
13348 
13349 			DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13350 				  pipe_name(pipe), level,
13351 				  sw_plane_wm->wm[level].plane_en,
13352 				  sw_plane_wm->wm[level].plane_res_b,
13353 				  sw_plane_wm->wm[level].plane_res_l,
13354 				  hw_plane_wm->wm[level].plane_en,
13355 				  hw_plane_wm->wm[level].plane_res_b,
13356 				  hw_plane_wm->wm[level].plane_res_l);
13357 		}
13358 
13359 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
13360 					 &sw_plane_wm->trans_wm)) {
13361 			DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
13362 				  pipe_name(pipe),
13363 				  sw_plane_wm->trans_wm.plane_en,
13364 				  sw_plane_wm->trans_wm.plane_res_b,
13365 				  sw_plane_wm->trans_wm.plane_res_l,
13366 				  hw_plane_wm->trans_wm.plane_en,
13367 				  hw_plane_wm->trans_wm.plane_res_b,
13368 				  hw_plane_wm->trans_wm.plane_res_l);
13369 		}
13370 
13371 		/* DDB */
13372 		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
13373 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
13374 
13375 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
13376 			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
13377 				  pipe_name(pipe),
13378 				  sw_ddb_entry->start, sw_ddb_entry->end,
13379 				  hw_ddb_entry->start, hw_ddb_entry->end);
13380 		}
13381 	}
13382 
13383 	kfree(hw);
13384 }
13385 
13386 static void
13387 verify_connector_state(struct intel_atomic_state *state,
13388 		       struct intel_crtc *crtc)
13389 {
13390 	struct drm_connector *connector;
13391 	struct drm_connector_state *new_conn_state;
13392 	int i;
13393 
13394 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
13395 		struct drm_encoder *encoder = connector->encoder;
13396 		struct intel_crtc_state *crtc_state = NULL;
13397 
13398 		if (new_conn_state->crtc != &crtc->base)
13399 			continue;
13400 
13401 		if (crtc)
13402 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13403 
13404 		intel_connector_verify_state(crtc_state, new_conn_state);
13405 
13406 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
13407 		     "connector's atomic encoder doesn't match legacy encoder\n");
13408 	}
13409 }
13410 
13411 static void
13412 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
13413 {
13414 	struct intel_encoder *encoder;
13415 	struct drm_connector *connector;
13416 	struct drm_connector_state *old_conn_state, *new_conn_state;
13417 	int i;
13418 
13419 	for_each_intel_encoder(&dev_priv->drm, encoder) {
13420 		bool enabled = false, found = false;
13421 		enum pipe pipe;
13422 
13423 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13424 			      encoder->base.base.id,
13425 			      encoder->base.name);
13426 
13427 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
13428 						   new_conn_state, i) {
13429 			if (old_conn_state->best_encoder == &encoder->base)
13430 				found = true;
13431 
13432 			if (new_conn_state->best_encoder != &encoder->base)
13433 				continue;
13434 			found = enabled = true;
13435 
13436 			I915_STATE_WARN(new_conn_state->crtc !=
13437 					encoder->base.crtc,
13438 			     "connector's crtc doesn't match encoder crtc\n");
13439 		}
13440 
13441 		if (!found)
13442 			continue;
13443 
13444 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
13445 		     "encoder's enabled state mismatch "
13446 		     "(expected %i, found %i)\n",
13447 		     !!encoder->base.crtc, enabled);
13448 
13449 		if (!encoder->base.crtc) {
13450 			bool active;
13451 
13452 			active = encoder->get_hw_state(encoder, &pipe);
13453 			I915_STATE_WARN(active,
13454 			     "encoder detached but still enabled on pipe %c.\n",
13455 			     pipe_name(pipe));
13456 		}
13457 	}
13458 }
13459 
13460 static void
13461 verify_crtc_state(struct intel_crtc *crtc,
13462 		  struct intel_crtc_state *old_crtc_state,
13463 		  struct intel_crtc_state *new_crtc_state)
13464 {
13465 	struct drm_device *dev = crtc->base.dev;
13466 	struct drm_i915_private *dev_priv = to_i915(dev);
13467 	struct intel_encoder *encoder;
13468 	struct intel_crtc_state *pipe_config;
13469 	struct drm_atomic_state *state;
13470 	bool active;
13471 
13472 	state = old_crtc_state->base.state;
13473 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
13474 	pipe_config = old_crtc_state;
13475 	memset(pipe_config, 0, sizeof(*pipe_config));
13476 	pipe_config->base.crtc = &crtc->base;
13477 	pipe_config->base.state = state;
13478 
13479 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
13480 
13481 	active = dev_priv->display.get_pipe_config(crtc, pipe_config);
13482 
13483 	/* we keep both pipes enabled on 830 */
13484 	if (IS_I830(dev_priv))
13485 		active = new_crtc_state->base.active;
13486 
13487 	I915_STATE_WARN(new_crtc_state->base.active != active,
13488 	     "crtc active state doesn't match with hw state "
13489 	     "(expected %i, found %i)\n", new_crtc_state->base.active, active);
13490 
13491 	I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
13492 	     "transitional active state does not match atomic hw state "
13493 	     "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
13494 
13495 	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
13496 		enum pipe pipe;
13497 
13498 		active = encoder->get_hw_state(encoder, &pipe);
13499 		I915_STATE_WARN(active != new_crtc_state->base.active,
13500 			"[ENCODER:%i] active %i with crtc active %i\n",
13501 			encoder->base.base.id, active, new_crtc_state->base.active);
13502 
13503 		I915_STATE_WARN(active && crtc->pipe != pipe,
13504 				"Encoder connected to wrong pipe %c\n",
13505 				pipe_name(pipe));
13506 
13507 		if (active)
13508 			encoder->get_config(encoder, pipe_config);
13509 	}
13510 
13511 	intel_crtc_compute_pixel_rate(pipe_config);
13512 
13513 	if (!new_crtc_state->base.active)
13514 		return;
13515 
13516 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
13517 
13518 	if (!intel_pipe_config_compare(new_crtc_state,
13519 				       pipe_config, false)) {
13520 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
13521 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
13522 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
13523 	}
13524 }
13525 
13526 static void
13527 intel_verify_planes(struct intel_atomic_state *state)
13528 {
13529 	struct intel_plane *plane;
13530 	const struct intel_plane_state *plane_state;
13531 	int i;
13532 
13533 	for_each_new_intel_plane_in_state(state, plane,
13534 					  plane_state, i)
13535 		assert_plane(plane, plane_state->planar_slave ||
13536 			     plane_state->base.visible);
13537 }
13538 
13539 static void
13540 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13541 			 struct intel_shared_dpll *pll,
13542 			 struct intel_crtc *crtc,
13543 			 struct intel_crtc_state *new_crtc_state)
13544 {
13545 	struct intel_dpll_hw_state dpll_hw_state;
13546 	unsigned int crtc_mask;
13547 	bool active;
13548 
13549 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13550 
13551 	DRM_DEBUG_KMS("%s\n", pll->info->name);
13552 
13553 	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13554 
13555 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13556 		I915_STATE_WARN(!pll->on && pll->active_mask,
13557 		     "pll in active use but not on in sw tracking\n");
13558 		I915_STATE_WARN(pll->on && !pll->active_mask,
13559 		     "pll is on but not used by any active crtc\n");
13560 		I915_STATE_WARN(pll->on != active,
13561 		     "pll on state mismatch (expected %i, found %i)\n",
13562 		     pll->on, active);
13563 	}
13564 
13565 	if (!crtc) {
13566 		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13567 				"more active pll users than references: %x vs %x\n",
13568 				pll->active_mask, pll->state.crtc_mask);
13569 
13570 		return;
13571 	}
13572 
13573 	crtc_mask = drm_crtc_mask(&crtc->base);
13574 
13575 	if (new_crtc_state->base.active)
13576 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13577 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13578 				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13579 	else
13580 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13581 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13582 				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13583 
13584 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13585 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13586 			crtc_mask, pll->state.crtc_mask);
13587 
13588 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13589 					  &dpll_hw_state,
13590 					  sizeof(dpll_hw_state)),
13591 			"pll hw state mismatch\n");
13592 }
13593 
13594 static void
13595 verify_shared_dpll_state(struct intel_crtc *crtc,
13596 			 struct intel_crtc_state *old_crtc_state,
13597 			 struct intel_crtc_state *new_crtc_state)
13598 {
13599 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13600 
13601 	if (new_crtc_state->shared_dpll)
13602 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13603 
13604 	if (old_crtc_state->shared_dpll &&
13605 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13606 		unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13607 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13608 
13609 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13610 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
13611 				pipe_name(drm_crtc_index(&crtc->base)));
13612 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13613 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
13614 				pipe_name(drm_crtc_index(&crtc->base)));
13615 	}
13616 }
13617 
13618 static void
13619 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13620 			  struct intel_atomic_state *state,
13621 			  struct intel_crtc_state *old_crtc_state,
13622 			  struct intel_crtc_state *new_crtc_state)
13623 {
13624 	if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13625 		return;
13626 
13627 	verify_wm_state(crtc, new_crtc_state);
13628 	verify_connector_state(state, crtc);
13629 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13630 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13631 }
13632 
13633 static void
13634 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13635 {
13636 	int i;
13637 
13638 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
13639 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13640 }
13641 
13642 static void
13643 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13644 			      struct intel_atomic_state *state)
13645 {
13646 	verify_encoder_state(dev_priv, state);
13647 	verify_connector_state(state, NULL);
13648 	verify_disabled_dpll_state(dev_priv);
13649 }
13650 
13651 static void
13652 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
13653 {
13654 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13655 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13656 	const struct drm_display_mode *adjusted_mode =
13657 		&crtc_state->base.adjusted_mode;
13658 
13659 	drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
13660 
13661 	/*
13662 	 * The scanline counter increments at the leading edge of hsync.
13663 	 *
13664 	 * On most platforms it starts counting from vtotal-1 on the
13665 	 * first active line. That means the scanline counter value is
13666 	 * always one less than what we would expect. Ie. just after
13667 	 * start of vblank, which also occurs at start of hsync (on the
13668 	 * last active line), the scanline counter will read vblank_start-1.
13669 	 *
13670 	 * On gen2 the scanline counter starts counting from 1 instead
13671 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13672 	 * to keep the value positive), instead of adding one.
13673 	 *
13674 	 * On HSW+ the behaviour of the scanline counter depends on the output
13675 	 * type. For DP ports it behaves like most other platforms, but on HDMI
13676 	 * there's an extra 1 line difference. So we need to add two instead of
13677 	 * one to the value.
13678 	 *
13679 	 * On VLV/CHV DSI the scanline counter would appear to increment
13680 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13681 	 * that means we can't tell whether we're in vblank or not while
13682 	 * we're on that particular line. We must still set scanline_offset
13683 	 * to 1 so that the vblank timestamps come out correct when we query
13684 	 * the scanline counter from within the vblank interrupt handler.
13685 	 * However if queried just before the start of vblank we'll get an
13686 	 * answer that's slightly in the future.
13687 	 */
13688 	if (IS_GEN(dev_priv, 2)) {
13689 		int vtotal;
13690 
13691 		vtotal = adjusted_mode->crtc_vtotal;
13692 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13693 			vtotal /= 2;
13694 
13695 		crtc->scanline_offset = vtotal - 1;
13696 	} else if (HAS_DDI(dev_priv) &&
13697 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13698 		crtc->scanline_offset = 2;
13699 	} else {
13700 		crtc->scanline_offset = 1;
13701 	}
13702 }
13703 
13704 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13705 {
13706 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13707 	struct intel_crtc_state *new_crtc_state;
13708 	struct intel_crtc *crtc;
13709 	int i;
13710 
13711 	if (!dev_priv->display.crtc_compute_clock)
13712 		return;
13713 
13714 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13715 		if (!needs_modeset(new_crtc_state))
13716 			continue;
13717 
13718 		intel_release_shared_dplls(state, crtc);
13719 	}
13720 }
13721 
13722 /*
13723  * This implements the workaround described in the "notes" section of the mode
13724  * set sequence documentation. When going from no pipes or single pipe to
13725  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13726  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13727  */
13728 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13729 {
13730 	struct intel_crtc_state *crtc_state;
13731 	struct intel_crtc *crtc;
13732 	struct intel_crtc_state *first_crtc_state = NULL;
13733 	struct intel_crtc_state *other_crtc_state = NULL;
13734 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13735 	int i;
13736 
13737 	/* look at all crtc's that are going to be enabled in during modeset */
13738 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13739 		if (!crtc_state->base.active ||
13740 		    !needs_modeset(crtc_state))
13741 			continue;
13742 
13743 		if (first_crtc_state) {
13744 			other_crtc_state = crtc_state;
13745 			break;
13746 		} else {
13747 			first_crtc_state = crtc_state;
13748 			first_pipe = crtc->pipe;
13749 		}
13750 	}
13751 
13752 	/* No workaround needed? */
13753 	if (!first_crtc_state)
13754 		return 0;
13755 
13756 	/* w/a possibly needed, check how many crtc's are already enabled. */
13757 	for_each_intel_crtc(state->base.dev, crtc) {
13758 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13759 		if (IS_ERR(crtc_state))
13760 			return PTR_ERR(crtc_state);
13761 
13762 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13763 
13764 		if (!crtc_state->base.active ||
13765 		    needs_modeset(crtc_state))
13766 			continue;
13767 
13768 		/* 2 or more enabled crtcs means no need for w/a */
13769 		if (enabled_pipe != INVALID_PIPE)
13770 			return 0;
13771 
13772 		enabled_pipe = crtc->pipe;
13773 	}
13774 
13775 	if (enabled_pipe != INVALID_PIPE)
13776 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13777 	else if (other_crtc_state)
13778 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13779 
13780 	return 0;
13781 }
13782 
13783 static int intel_modeset_checks(struct intel_atomic_state *state)
13784 {
13785 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13786 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13787 	struct intel_crtc *crtc;
13788 	int ret, i;
13789 
13790 	/* keep the current setting */
13791 	if (!state->cdclk.force_min_cdclk_changed)
13792 		state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13793 
13794 	state->modeset = true;
13795 	state->active_pipes = dev_priv->active_pipes;
13796 	state->cdclk.logical = dev_priv->cdclk.logical;
13797 	state->cdclk.actual = dev_priv->cdclk.actual;
13798 
13799 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13800 					    new_crtc_state, i) {
13801 		if (new_crtc_state->base.active)
13802 			state->active_pipes |= BIT(crtc->pipe);
13803 		else
13804 			state->active_pipes &= ~BIT(crtc->pipe);
13805 
13806 		if (old_crtc_state->base.active != new_crtc_state->base.active)
13807 			state->active_pipe_changes |= BIT(crtc->pipe);
13808 	}
13809 
13810 	if (state->active_pipe_changes) {
13811 		ret = intel_atomic_lock_global_state(state);
13812 		if (ret)
13813 			return ret;
13814 	}
13815 
13816 	ret = intel_modeset_calc_cdclk(state);
13817 	if (ret)
13818 		return ret;
13819 
13820 	intel_modeset_clear_plls(state);
13821 
13822 	if (IS_HASWELL(dev_priv))
13823 		return haswell_mode_set_planes_workaround(state);
13824 
13825 	return 0;
13826 }
13827 
13828 /*
13829  * Handle calculation of various watermark data at the end of the atomic check
13830  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13831  * handlers to ensure that all derived state has been updated.
13832  */
13833 static int calc_watermark_data(struct intel_atomic_state *state)
13834 {
13835 	struct drm_device *dev = state->base.dev;
13836 	struct drm_i915_private *dev_priv = to_i915(dev);
13837 
13838 	/* Is there platform-specific watermark information to calculate? */
13839 	if (dev_priv->display.compute_global_watermarks)
13840 		return dev_priv->display.compute_global_watermarks(state);
13841 
13842 	return 0;
13843 }
13844 
13845 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13846 				     struct intel_crtc_state *new_crtc_state)
13847 {
13848 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13849 		return;
13850 
13851 	new_crtc_state->base.mode_changed = false;
13852 	new_crtc_state->update_pipe = true;
13853 
13854 	/*
13855 	 * If we're not doing the full modeset we want to
13856 	 * keep the current M/N values as they may be
13857 	 * sufficiently different to the computed values
13858 	 * to cause problems.
13859 	 *
13860 	 * FIXME: should really copy more fuzzy state here
13861 	 */
13862 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13863 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13864 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13865 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13866 }
13867 
13868 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
13869 					  struct intel_crtc *crtc,
13870 					  u8 plane_ids_mask)
13871 {
13872 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13873 	struct intel_plane *plane;
13874 
13875 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
13876 		struct intel_plane_state *plane_state;
13877 
13878 		if ((plane_ids_mask & BIT(plane->id)) == 0)
13879 			continue;
13880 
13881 		plane_state = intel_atomic_get_plane_state(state, plane);
13882 		if (IS_ERR(plane_state))
13883 			return PTR_ERR(plane_state);
13884 	}
13885 
13886 	return 0;
13887 }
13888 
13889 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
13890 {
13891 	/* See {hsw,vlv,ivb}_plane_ratio() */
13892 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
13893 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
13894 		IS_IVYBRIDGE(dev_priv);
13895 }
13896 
13897 static int intel_atomic_check_planes(struct intel_atomic_state *state,
13898 				     bool *need_modeset)
13899 {
13900 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13901 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13902 	struct intel_plane_state *plane_state;
13903 	struct intel_plane *plane;
13904 	struct intel_crtc *crtc;
13905 	int i, ret;
13906 
13907 	ret = icl_add_linked_planes(state);
13908 	if (ret)
13909 		return ret;
13910 
13911 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13912 		ret = intel_plane_atomic_check(state, plane);
13913 		if (ret) {
13914 			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
13915 					 plane->base.base.id, plane->base.name);
13916 			return ret;
13917 		}
13918 	}
13919 
13920 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13921 					    new_crtc_state, i) {
13922 		u8 old_active_planes, new_active_planes;
13923 
13924 		ret = icl_check_nv12_planes(new_crtc_state);
13925 		if (ret)
13926 			return ret;
13927 
13928 		/*
13929 		 * On some platforms the number of active planes affects
13930 		 * the planes' minimum cdclk calculation. Add such planes
13931 		 * to the state before we compute the minimum cdclk.
13932 		 */
13933 		if (!active_planes_affects_min_cdclk(dev_priv))
13934 			continue;
13935 
13936 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
13937 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
13938 
13939 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
13940 			continue;
13941 
13942 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
13943 		if (ret)
13944 			return ret;
13945 	}
13946 
13947 	/*
13948 	 * active_planes bitmask has been updated, and potentially
13949 	 * affected planes are part of the state. We can now
13950 	 * compute the minimum cdclk for each plane.
13951 	 */
13952 	for_each_new_intel_plane_in_state(state, plane, plane_state, i)
13953 		*need_modeset |= intel_plane_calc_min_cdclk(state, plane);
13954 
13955 	return 0;
13956 }
13957 
13958 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
13959 {
13960 	struct intel_crtc_state *crtc_state;
13961 	struct intel_crtc *crtc;
13962 	int i;
13963 
13964 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13965 		int ret = intel_crtc_atomic_check(state, crtc);
13966 		if (ret) {
13967 			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
13968 					 crtc->base.base.id, crtc->base.name);
13969 			return ret;
13970 		}
13971 	}
13972 
13973 	return 0;
13974 }
13975 
13976 /**
13977  * intel_atomic_check - validate state object
13978  * @dev: drm device
13979  * @_state: state to validate
13980  */
13981 static int intel_atomic_check(struct drm_device *dev,
13982 			      struct drm_atomic_state *_state)
13983 {
13984 	struct drm_i915_private *dev_priv = to_i915(dev);
13985 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
13986 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13987 	struct intel_crtc *crtc;
13988 	int ret, i;
13989 	bool any_ms = false;
13990 
13991 	/* Catch I915_MODE_FLAG_INHERITED */
13992 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13993 					    new_crtc_state, i) {
13994 		if (new_crtc_state->base.mode.private_flags !=
13995 		    old_crtc_state->base.mode.private_flags)
13996 			new_crtc_state->base.mode_changed = true;
13997 	}
13998 
13999 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
14000 	if (ret)
14001 		goto fail;
14002 
14003 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14004 					    new_crtc_state, i) {
14005 		if (!needs_modeset(new_crtc_state))
14006 			continue;
14007 
14008 		if (!new_crtc_state->base.enable) {
14009 			any_ms = true;
14010 			continue;
14011 		}
14012 
14013 		ret = intel_modeset_pipe_config(new_crtc_state);
14014 		if (ret)
14015 			goto fail;
14016 
14017 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14018 
14019 		if (needs_modeset(new_crtc_state))
14020 			any_ms = true;
14021 	}
14022 
14023 	if (any_ms && !check_digital_port_conflicts(state)) {
14024 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
14025 		ret = EINVAL;
14026 		goto fail;
14027 	}
14028 
14029 	ret = drm_dp_mst_atomic_check(&state->base);
14030 	if (ret)
14031 		goto fail;
14032 
14033 	any_ms |= state->cdclk.force_min_cdclk_changed;
14034 
14035 	ret = intel_atomic_check_planes(state, &any_ms);
14036 	if (ret)
14037 		goto fail;
14038 
14039 	if (any_ms) {
14040 		ret = intel_modeset_checks(state);
14041 		if (ret)
14042 			goto fail;
14043 	} else {
14044 		state->cdclk.logical = dev_priv->cdclk.logical;
14045 	}
14046 
14047 	ret = intel_atomic_check_crtcs(state);
14048 	if (ret)
14049 		goto fail;
14050 
14051 	intel_fbc_choose_crtc(dev_priv, state);
14052 	ret = calc_watermark_data(state);
14053 	if (ret)
14054 		goto fail;
14055 
14056 	ret = intel_bw_atomic_check(state);
14057 	if (ret)
14058 		goto fail;
14059 
14060 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14061 					    new_crtc_state, i) {
14062 		if (!needs_modeset(new_crtc_state) &&
14063 		    !new_crtc_state->update_pipe)
14064 			continue;
14065 
14066 		intel_dump_pipe_config(new_crtc_state, state,
14067 				       needs_modeset(new_crtc_state) ?
14068 				       "[modeset]" : "[fastset]");
14069 	}
14070 
14071 	return 0;
14072 
14073  fail:
14074 	if (ret == -EDEADLK)
14075 		return ret;
14076 
14077 	/*
14078 	 * FIXME would probably be nice to know which crtc specifically
14079 	 * caused the failure, in cases where we can pinpoint it.
14080 	 */
14081 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14082 					    new_crtc_state, i)
14083 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14084 
14085 	return ret;
14086 }
14087 
14088 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14089 {
14090 	return drm_atomic_helper_prepare_planes(state->base.dev,
14091 						&state->base);
14092 }
14093 
14094 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
14095 {
14096 	struct drm_device *dev = crtc->base.dev;
14097 	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
14098 
14099 	if (!vblank->max_vblank_count)
14100 		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
14101 
14102 	return crtc->base.funcs->get_vblank_counter(&crtc->base);
14103 }
14104 
14105 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14106 				  struct intel_crtc_state *crtc_state)
14107 {
14108 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14109 
14110 	if (!IS_GEN(dev_priv, 2))
14111 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14112 
14113 	if (crtc_state->has_pch_encoder) {
14114 		enum pipe pch_transcoder =
14115 			intel_crtc_pch_transcoder(crtc);
14116 
14117 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14118 	}
14119 }
14120 
14121 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
14122 			       const struct intel_crtc_state *new_crtc_state)
14123 {
14124 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
14125 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14126 
14127 	/*
14128 	 * Update pipe size and adjust fitter if needed: the reason for this is
14129 	 * that in compute_mode_changes we check the native mode (not the pfit
14130 	 * mode) to see if we can flip rather than do a full mode set. In the
14131 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
14132 	 * pfit state, we'll end up with a big fb scanned out into the wrong
14133 	 * sized surface.
14134 	 */
14135 	intel_set_pipe_src_size(new_crtc_state);
14136 
14137 	/* on skylake this is done by detaching scalers */
14138 	if (INTEL_GEN(dev_priv) >= 9) {
14139 		skl_detach_scalers(new_crtc_state);
14140 
14141 		if (new_crtc_state->pch_pfit.enabled)
14142 			skylake_pfit_enable(new_crtc_state);
14143 	} else if (HAS_PCH_SPLIT(dev_priv)) {
14144 		if (new_crtc_state->pch_pfit.enabled)
14145 			ironlake_pfit_enable(new_crtc_state);
14146 		else if (old_crtc_state->pch_pfit.enabled)
14147 			ironlake_pfit_disable(old_crtc_state);
14148 	}
14149 
14150 	if (INTEL_GEN(dev_priv) >= 11)
14151 		icl_set_pipe_chicken(crtc);
14152 }
14153 
14154 static void commit_pipe_config(struct intel_atomic_state *state,
14155 			       struct intel_crtc_state *old_crtc_state,
14156 			       struct intel_crtc_state *new_crtc_state)
14157 {
14158 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14159 	bool modeset = needs_modeset(new_crtc_state);
14160 
14161 	/*
14162 	 * During modesets pipe configuration was programmed as the
14163 	 * CRTC was enabled.
14164 	 */
14165 	if (!modeset) {
14166 		if (new_crtc_state->base.color_mgmt_changed ||
14167 		    new_crtc_state->update_pipe)
14168 			intel_color_commit(new_crtc_state);
14169 
14170 		if (INTEL_GEN(dev_priv) >= 9)
14171 			skl_detach_scalers(new_crtc_state);
14172 
14173 		if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14174 			bdw_set_pipemisc(new_crtc_state);
14175 
14176 		if (new_crtc_state->update_pipe)
14177 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
14178 	}
14179 
14180 	if (dev_priv->display.atomic_update_watermarks)
14181 		dev_priv->display.atomic_update_watermarks(state,
14182 							   new_crtc_state);
14183 }
14184 
14185 static void intel_update_crtc(struct intel_crtc *crtc,
14186 			      struct intel_atomic_state *state,
14187 			      struct intel_crtc_state *old_crtc_state,
14188 			      struct intel_crtc_state *new_crtc_state)
14189 {
14190 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14191 	bool modeset = needs_modeset(new_crtc_state);
14192 	struct intel_plane_state *new_plane_state =
14193 		intel_atomic_get_new_plane_state(state,
14194 						 to_intel_plane(crtc->base.primary));
14195 
14196 	if (modeset) {
14197 		intel_crtc_update_active_timings(new_crtc_state);
14198 
14199 		dev_priv->display.crtc_enable(new_crtc_state, state);
14200 
14201 		/* vblanks work again, re-enable pipe CRC. */
14202 		intel_crtc_enable_pipe_crc(crtc);
14203 	} else {
14204 		intel_pre_plane_update(old_crtc_state, new_crtc_state);
14205 
14206 		if (new_crtc_state->update_pipe)
14207 			intel_encoders_update_pipe(crtc, new_crtc_state, state);
14208 	}
14209 
14210 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14211 		intel_fbc_disable(crtc);
14212 	else if (new_plane_state)
14213 		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14214 
14215 	/* Perform vblank evasion around commit operation */
14216 	intel_pipe_update_start(new_crtc_state);
14217 
14218 	commit_pipe_config(state, old_crtc_state, new_crtc_state);
14219 
14220 	if (INTEL_GEN(dev_priv) >= 9)
14221 		skl_update_planes_on_crtc(state, crtc);
14222 	else
14223 		i9xx_update_planes_on_crtc(state, crtc);
14224 
14225 	intel_pipe_update_end(new_crtc_state);
14226 
14227 	/*
14228 	 * We usually enable FIFO underrun interrupts as part of the
14229 	 * CRTC enable sequence during modesets.  But when we inherit a
14230 	 * valid pipe configuration from the BIOS we need to take care
14231 	 * of enabling them on the CRTC's first fastset.
14232 	 */
14233 	if (new_crtc_state->update_pipe && !modeset &&
14234 	    old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14235 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14236 }
14237 
14238 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
14239 {
14240 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev);
14241 	enum transcoder slave_transcoder;
14242 
14243 	WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
14244 
14245 	slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
14246 	return intel_get_crtc_for_pipe(dev_priv,
14247 				       (enum pipe)slave_transcoder);
14248 }
14249 
14250 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
14251 					  struct intel_crtc_state *old_crtc_state,
14252 					  struct intel_crtc_state *new_crtc_state,
14253 					  struct intel_crtc *crtc)
14254 {
14255 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14256 
14257 	intel_crtc_disable_planes(state, crtc);
14258 
14259 	/*
14260 	 * We need to disable pipe CRC before disabling the pipe,
14261 	 * or we race against vblank off.
14262 	 */
14263 	intel_crtc_disable_pipe_crc(crtc);
14264 
14265 	dev_priv->display.crtc_disable(old_crtc_state, state);
14266 	crtc->active = false;
14267 	intel_fbc_disable(crtc);
14268 	intel_disable_shared_dpll(old_crtc_state);
14269 
14270 	/*
14271 	 * Underruns don't always raise interrupts,
14272 	 * so check manually.
14273 	 */
14274 	intel_check_cpu_fifo_underruns(dev_priv);
14275 	intel_check_pch_fifo_underruns(dev_priv);
14276 
14277 	/* FIXME unify this for all platforms */
14278 	if (!new_crtc_state->base.active &&
14279 	    !HAS_GMCH(dev_priv) &&
14280 	    dev_priv->display.initial_watermarks)
14281 		dev_priv->display.initial_watermarks(state,
14282 						     new_crtc_state);
14283 }
14284 
14285 static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
14286 						   struct intel_crtc *crtc,
14287 						   struct intel_crtc_state *old_crtc_state,
14288 						   struct intel_crtc_state *new_crtc_state)
14289 {
14290 	struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14291 	struct intel_crtc_state *new_slave_crtc_state =
14292 		intel_atomic_get_new_crtc_state(state, slave_crtc);
14293 	struct intel_crtc_state *old_slave_crtc_state =
14294 		intel_atomic_get_old_crtc_state(state, slave_crtc);
14295 
14296 	WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14297 		!old_slave_crtc_state);
14298 
14299 	/* Disable Slave first */
14300 	intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
14301 	if (old_slave_crtc_state->base.active)
14302 		intel_old_crtc_state_disables(state,
14303 					      old_slave_crtc_state,
14304 					      new_slave_crtc_state,
14305 					      slave_crtc);
14306 
14307 	/* Disable Master */
14308 	intel_pre_plane_update(old_crtc_state, new_crtc_state);
14309 	if (old_crtc_state->base.active)
14310 		intel_old_crtc_state_disables(state,
14311 					      old_crtc_state,
14312 					      new_crtc_state,
14313 					      crtc);
14314 }
14315 
14316 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
14317 {
14318 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14319 	struct intel_crtc *crtc;
14320 	int i;
14321 
14322 	/*
14323 	 * Disable CRTC/pipes in reverse order because some features(MST in
14324 	 * TGL+) requires master and slave relationship between pipes, so it
14325 	 * should always pick the lowest pipe as master as it will be enabled
14326 	 * first and disable in the reverse order so the master will be the
14327 	 * last one to be disabled.
14328 	 */
14329 	for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
14330 						    new_crtc_state, i) {
14331 		if (!needs_modeset(new_crtc_state))
14332 			continue;
14333 
14334 		/* In case of Transcoder port Sync master slave CRTCs can be
14335 		 * assigned in any order and we need to make sure that
14336 		 * slave CRTCs are disabled first and then master CRTC since
14337 		 * Slave vblanks are masked till Master Vblanks.
14338 		 */
14339 		if (is_trans_port_sync_mode(new_crtc_state)) {
14340 			if (is_trans_port_sync_master(new_crtc_state))
14341 				intel_trans_port_sync_modeset_disables(state,
14342 								       crtc,
14343 								       old_crtc_state,
14344 								       new_crtc_state);
14345 			else
14346 				continue;
14347 		} else {
14348 			intel_pre_plane_update(old_crtc_state, new_crtc_state);
14349 
14350 			if (old_crtc_state->base.active)
14351 				intel_old_crtc_state_disables(state,
14352 							      old_crtc_state,
14353 							      new_crtc_state,
14354 							      crtc);
14355 		}
14356 	}
14357 }
14358 
14359 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
14360 {
14361 	struct intel_crtc *crtc;
14362 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14363 	int i;
14364 
14365 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14366 		if (!new_crtc_state->base.active)
14367 			continue;
14368 
14369 		intel_update_crtc(crtc, state, old_crtc_state,
14370 				  new_crtc_state);
14371 	}
14372 }
14373 
14374 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
14375 					      struct intel_atomic_state *state,
14376 					      struct intel_crtc_state *new_crtc_state)
14377 {
14378 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14379 
14380 	intel_crtc_update_active_timings(new_crtc_state);
14381 	dev_priv->display.crtc_enable(new_crtc_state, state);
14382 	intel_crtc_enable_pipe_crc(crtc);
14383 }
14384 
14385 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
14386 				       struct intel_atomic_state *state)
14387 {
14388 	struct drm_connector *uninitialized_var(conn);
14389 	struct drm_connector_state *conn_state;
14390 	struct intel_dp *intel_dp;
14391 	int i;
14392 
14393 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
14394 		if (conn_state->crtc == &crtc->base)
14395 			break;
14396 	}
14397 	intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
14398 	intel_dp_stop_link_train(intel_dp);
14399 }
14400 
14401 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
14402 					   struct intel_atomic_state *state)
14403 {
14404 	struct intel_crtc_state *new_crtc_state =
14405 		intel_atomic_get_new_crtc_state(state, crtc);
14406 	struct intel_crtc_state *old_crtc_state =
14407 		intel_atomic_get_old_crtc_state(state, crtc);
14408 	struct intel_plane_state *new_plane_state =
14409 		intel_atomic_get_new_plane_state(state,
14410 						 to_intel_plane(crtc->base.primary));
14411 	bool modeset = needs_modeset(new_crtc_state);
14412 
14413 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
14414 		intel_fbc_disable(crtc);
14415 	else if (new_plane_state)
14416 		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
14417 
14418 	/* Perform vblank evasion around commit operation */
14419 	intel_pipe_update_start(new_crtc_state);
14420 	commit_pipe_config(state, old_crtc_state, new_crtc_state);
14421 	skl_update_planes_on_crtc(state, crtc);
14422 	intel_pipe_update_end(new_crtc_state);
14423 
14424 	/*
14425 	 * We usually enable FIFO underrun interrupts as part of the
14426 	 * CRTC enable sequence during modesets.  But when we inherit a
14427 	 * valid pipe configuration from the BIOS we need to take care
14428 	 * of enabling them on the CRTC's first fastset.
14429 	 */
14430 	if (new_crtc_state->update_pipe && !modeset &&
14431 	    old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14432 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14433 }
14434 
14435 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
14436 					       struct intel_atomic_state *state,
14437 					       struct intel_crtc_state *old_crtc_state,
14438 					       struct intel_crtc_state *new_crtc_state)
14439 {
14440 	struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
14441 	struct intel_crtc_state *new_slave_crtc_state =
14442 		intel_atomic_get_new_crtc_state(state, slave_crtc);
14443 	struct intel_crtc_state *old_slave_crtc_state =
14444 		intel_atomic_get_old_crtc_state(state, slave_crtc);
14445 
14446 	WARN_ON(!slave_crtc || !new_slave_crtc_state ||
14447 		!old_slave_crtc_state);
14448 
14449 	DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
14450 		      crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
14451 		      slave_crtc->base.name);
14452 
14453 	/* Enable seq for slave with with DP_TP_CTL left Idle until the
14454 	 * master is ready
14455 	 */
14456 	intel_crtc_enable_trans_port_sync(slave_crtc,
14457 					  state,
14458 					  new_slave_crtc_state);
14459 
14460 	/* Enable seq for master with with DP_TP_CTL left Idle */
14461 	intel_crtc_enable_trans_port_sync(crtc,
14462 					  state,
14463 					  new_crtc_state);
14464 
14465 	/* Set Slave's DP_TP_CTL to Normal */
14466 	intel_set_dp_tp_ctl_normal(slave_crtc,
14467 				   state);
14468 
14469 	/* Set Master's DP_TP_CTL To Normal */
14470 	usleep_range(200, 400);
14471 	intel_set_dp_tp_ctl_normal(crtc,
14472 				   state);
14473 
14474 	/* Now do the post crtc enable for all master and slaves */
14475 	intel_post_crtc_enable_updates(slave_crtc,
14476 				       state);
14477 	intel_post_crtc_enable_updates(crtc,
14478 				       state);
14479 }
14480 
14481 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
14482 {
14483 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14484 	struct intel_crtc *crtc;
14485 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14486 	unsigned int updated = 0;
14487 	bool progress;
14488 	int i;
14489 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
14490 	u8 required_slices = state->wm_results.ddb.enabled_slices;
14491 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
14492 
14493 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
14494 		/* ignore allocations for crtc's that have been turned off. */
14495 		if (new_crtc_state->base.active)
14496 			entries[i] = old_crtc_state->wm.skl.ddb;
14497 
14498 	/* If 2nd DBuf slice required, enable it here */
14499 	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
14500 		icl_dbuf_slices_update(dev_priv, required_slices);
14501 
14502 	/*
14503 	 * Whenever the number of active pipes changes, we need to make sure we
14504 	 * update the pipes in the right order so that their ddb allocations
14505 	 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
14506 	 * cause pipe underruns and other bad stuff.
14507 	 */
14508 	do {
14509 		progress = false;
14510 
14511 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14512 			enum pipe pipe = crtc->pipe;
14513 			bool vbl_wait = false;
14514 			bool modeset = needs_modeset(new_crtc_state);
14515 
14516 			if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active)
14517 				continue;
14518 
14519 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
14520 							entries,
14521 							INTEL_NUM_PIPES(dev_priv), i))
14522 				continue;
14523 
14524 			updated |= BIT(pipe);
14525 			entries[i] = new_crtc_state->wm.skl.ddb;
14526 
14527 			/*
14528 			 * If this is an already active pipe, it's DDB changed,
14529 			 * and this isn't the last pipe that needs updating
14530 			 * then we need to wait for a vblank to pass for the
14531 			 * new ddb allocation to take effect.
14532 			 */
14533 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
14534 						 &old_crtc_state->wm.skl.ddb) &&
14535 			    !modeset &&
14536 			    state->wm_results.dirty_pipes != updated)
14537 				vbl_wait = true;
14538 
14539 			if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
14540 				if (is_trans_port_sync_master(new_crtc_state))
14541 					intel_update_trans_port_sync_crtcs(crtc,
14542 									   state,
14543 									   old_crtc_state,
14544 									   new_crtc_state);
14545 				else
14546 					continue;
14547 			} else {
14548 				intel_update_crtc(crtc, state, old_crtc_state,
14549 						  new_crtc_state);
14550 			}
14551 
14552 			if (vbl_wait)
14553 				intel_wait_for_vblank(dev_priv, pipe);
14554 
14555 			progress = true;
14556 		}
14557 	} while (progress);
14558 
14559 	/* If 2nd DBuf slice is no more required disable it */
14560 	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
14561 		icl_dbuf_slices_update(dev_priv, required_slices);
14562 }
14563 
14564 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
14565 {
14566 	struct intel_atomic_state *state, *next;
14567 	struct llist_node *freed;
14568 
14569 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
14570 	llist_for_each_entry_safe(state, next, freed, freed)
14571 		drm_atomic_state_put(&state->base);
14572 }
14573 
14574 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
14575 {
14576 	struct drm_i915_private *dev_priv =
14577 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
14578 
14579 	intel_atomic_helper_free_state(dev_priv);
14580 }
14581 
14582 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
14583 {
14584 	struct wait_queue_entry wait_fence, wait_reset;
14585 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
14586 
14587 	init_wait_entry(&wait_fence, 0);
14588 	init_wait_entry(&wait_reset, 0);
14589 	for (;;) {
14590 		prepare_to_wait(&intel_state->commit_ready.wait,
14591 				&wait_fence, TASK_UNINTERRUPTIBLE);
14592 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14593 					      I915_RESET_MODESET),
14594 				&wait_reset, TASK_UNINTERRUPTIBLE);
14595 
14596 
14597 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
14598 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
14599 			break;
14600 
14601 		schedule();
14602 	}
14603 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
14604 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
14605 				  I915_RESET_MODESET),
14606 		    &wait_reset);
14607 }
14608 
14609 static void intel_atomic_cleanup_work(struct work_struct *work)
14610 {
14611 	struct drm_atomic_state *state =
14612 		container_of(work, struct drm_atomic_state, commit_work);
14613 	struct drm_i915_private *i915 = to_i915(state->dev);
14614 
14615 	drm_atomic_helper_cleanup_planes(&i915->drm, state);
14616 	drm_atomic_helper_commit_cleanup_done(state);
14617 	drm_atomic_state_put(state);
14618 
14619 	intel_atomic_helper_free_state(i915);
14620 }
14621 
14622 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
14623 {
14624 	struct drm_device *dev = state->base.dev;
14625 	struct drm_i915_private *dev_priv = to_i915(dev);
14626 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
14627 	struct intel_crtc *crtc;
14628 	u64 put_domains[I915_MAX_PIPES] = {};
14629 	intel_wakeref_t wakeref = 0;
14630 	int i;
14631 
14632 	intel_atomic_commit_fence_wait(state);
14633 
14634 	drm_atomic_helper_wait_for_dependencies(&state->base);
14635 
14636 	if (state->modeset)
14637 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
14638 
14639 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14640 					    new_crtc_state, i) {
14641 		if (needs_modeset(new_crtc_state) ||
14642 		    new_crtc_state->update_pipe) {
14643 
14644 			put_domains[crtc->pipe] =
14645 				modeset_get_crtc_power_domains(new_crtc_state);
14646 		}
14647 	}
14648 
14649 	intel_commit_modeset_disables(state);
14650 
14651 	/* FIXME: Eventually get rid of our crtc->config pointer */
14652 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14653 		crtc->config = new_crtc_state;
14654 
14655 	if (state->modeset) {
14656 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
14657 
14658 		intel_set_cdclk_pre_plane_update(dev_priv,
14659 						 &state->cdclk.actual,
14660 						 &dev_priv->cdclk.actual,
14661 						 state->cdclk.pipe);
14662 
14663 		/*
14664 		 * SKL workaround: bspec recommends we disable the SAGV when we
14665 		 * have more then one pipe enabled
14666 		 */
14667 		if (!intel_can_enable_sagv(state))
14668 			intel_disable_sagv(dev_priv);
14669 
14670 		intel_modeset_verify_disabled(dev_priv, state);
14671 	}
14672 
14673 	/* Complete the events for pipes that have now been disabled */
14674 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14675 		bool modeset = needs_modeset(new_crtc_state);
14676 
14677 		/* Complete events for now disable pipes here. */
14678 		if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
14679 			spin_lock_irq(&dev->event_lock);
14680 			drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
14681 			spin_unlock_irq(&dev->event_lock);
14682 
14683 			new_crtc_state->base.event = NULL;
14684 		}
14685 	}
14686 
14687 	if (state->modeset)
14688 		intel_encoders_update_prepare(state);
14689 
14690 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
14691 	dev_priv->display.commit_modeset_enables(state);
14692 
14693 	if (state->modeset) {
14694 		intel_encoders_update_complete(state);
14695 
14696 		intel_set_cdclk_post_plane_update(dev_priv,
14697 						  &state->cdclk.actual,
14698 						  &dev_priv->cdclk.actual,
14699 						  state->cdclk.pipe);
14700 	}
14701 
14702 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
14703 	 * already, but still need the state for the delayed optimization. To
14704 	 * fix this:
14705 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
14706 	 * - schedule that vblank worker _before_ calling hw_done
14707 	 * - at the start of commit_tail, cancel it _synchrously
14708 	 * - switch over to the vblank wait helper in the core after that since
14709 	 *   we don't need out special handling any more.
14710 	 */
14711 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
14712 
14713 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14714 		if (new_crtc_state->base.active &&
14715 		    !needs_modeset(new_crtc_state) &&
14716 		    (new_crtc_state->base.color_mgmt_changed ||
14717 		     new_crtc_state->update_pipe))
14718 			intel_color_load_luts(new_crtc_state);
14719 	}
14720 
14721 	/*
14722 	 * Now that the vblank has passed, we can go ahead and program the
14723 	 * optimal watermarks on platforms that need two-step watermark
14724 	 * programming.
14725 	 *
14726 	 * TODO: Move this (and other cleanup) to an async worker eventually.
14727 	 */
14728 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14729 		if (dev_priv->display.optimize_watermarks)
14730 			dev_priv->display.optimize_watermarks(state,
14731 							      new_crtc_state);
14732 	}
14733 
14734 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
14735 		intel_post_plane_update(old_crtc_state);
14736 
14737 		if (put_domains[i])
14738 			modeset_put_power_domains(dev_priv, put_domains[i]);
14739 
14740 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
14741 	}
14742 
14743 	if (state->modeset)
14744 		intel_verify_planes(state);
14745 
14746 	if (state->modeset && intel_can_enable_sagv(state))
14747 		intel_enable_sagv(dev_priv);
14748 
14749 	drm_atomic_helper_commit_hw_done(&state->base);
14750 
14751 	if (state->modeset) {
14752 		/* As one of the primary mmio accessors, KMS has a high
14753 		 * likelihood of triggering bugs in unclaimed access. After we
14754 		 * finish modesetting, see if an error has been flagged, and if
14755 		 * so enable debugging for the next modeset - and hope we catch
14756 		 * the culprit.
14757 		 */
14758 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
14759 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
14760 	}
14761 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14762 
14763 	/*
14764 	 * Defer the cleanup of the old state to a separate worker to not
14765 	 * impede the current task (userspace for blocking modesets) that
14766 	 * are executed inline. For out-of-line asynchronous modesets/flips,
14767 	 * deferring to a new worker seems overkill, but we would place a
14768 	 * schedule point (cond_resched()) here anyway to keep latencies
14769 	 * down.
14770 	 */
14771 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
14772 	queue_work(system_highpri_wq, &state->base.commit_work);
14773 }
14774 
14775 static void intel_atomic_commit_work(struct work_struct *work)
14776 {
14777 	struct intel_atomic_state *state =
14778 		container_of(work, struct intel_atomic_state, base.commit_work);
14779 
14780 	intel_atomic_commit_tail(state);
14781 }
14782 
14783 static int __i915_sw_fence_call
14784 intel_atomic_commit_ready(struct i915_sw_fence *fence,
14785 			  enum i915_sw_fence_notify notify)
14786 {
14787 	struct intel_atomic_state *state =
14788 		container_of(fence, struct intel_atomic_state, commit_ready);
14789 
14790 	switch (notify) {
14791 	case FENCE_COMPLETE:
14792 		/* we do blocking waits in the worker, nothing to do here */
14793 		break;
14794 	case FENCE_FREE:
14795 		{
14796 			struct intel_atomic_helper *helper =
14797 				&to_i915(state->base.dev)->atomic_helper;
14798 
14799 			if (llist_add(&state->freed, &helper->free_list))
14800 				schedule_work(&helper->free_work);
14801 			break;
14802 		}
14803 	}
14804 
14805 	return NOTIFY_DONE;
14806 }
14807 
14808 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
14809 {
14810 	struct intel_plane_state *old_plane_state, *new_plane_state;
14811 	struct intel_plane *plane;
14812 	int i;
14813 
14814 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
14815 					     new_plane_state, i)
14816 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
14817 					to_intel_frontbuffer(new_plane_state->base.fb),
14818 					plane->frontbuffer_bit);
14819 }
14820 
14821 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
14822 {
14823 	struct intel_crtc *crtc;
14824 
14825 	for_each_intel_crtc(&dev_priv->drm, crtc)
14826 		drm_modeset_lock_assert_held(&crtc->base.mutex);
14827 }
14828 
14829 static int intel_atomic_commit(struct drm_device *dev,
14830 			       struct drm_atomic_state *_state,
14831 			       bool nonblock)
14832 {
14833 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
14834 	struct drm_i915_private *dev_priv = to_i915(dev);
14835 	int ret = 0;
14836 
14837 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
14838 
14839 	drm_atomic_state_get(&state->base);
14840 	i915_sw_fence_init(&state->commit_ready,
14841 			   intel_atomic_commit_ready);
14842 
14843 	/*
14844 	 * The intel_legacy_cursor_update() fast path takes care
14845 	 * of avoiding the vblank waits for simple cursor
14846 	 * movement and flips. For cursor on/off and size changes,
14847 	 * we want to perform the vblank waits so that watermark
14848 	 * updates happen during the correct frames. Gen9+ have
14849 	 * double buffered watermarks and so shouldn't need this.
14850 	 *
14851 	 * Unset state->legacy_cursor_update before the call to
14852 	 * drm_atomic_helper_setup_commit() because otherwise
14853 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
14854 	 * we get FIFO underruns because we didn't wait
14855 	 * for vblank.
14856 	 *
14857 	 * FIXME doing watermarks and fb cleanup from a vblank worker
14858 	 * (assuming we had any) would solve these problems.
14859 	 */
14860 	if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
14861 		struct intel_crtc_state *new_crtc_state;
14862 		struct intel_crtc *crtc;
14863 		int i;
14864 
14865 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
14866 			if (new_crtc_state->wm.need_postvbl_update ||
14867 			    new_crtc_state->update_wm_post)
14868 				state->base.legacy_cursor_update = false;
14869 	}
14870 
14871 	ret = intel_atomic_prepare_commit(state);
14872 	if (ret) {
14873 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14874 		i915_sw_fence_commit(&state->commit_ready);
14875 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14876 		return ret;
14877 	}
14878 
14879 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
14880 	if (!ret)
14881 		ret = drm_atomic_helper_swap_state(&state->base, true);
14882 
14883 	if (ret) {
14884 		i915_sw_fence_commit(&state->commit_ready);
14885 
14886 		drm_atomic_helper_cleanup_planes(dev, &state->base);
14887 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
14888 		return ret;
14889 	}
14890 	dev_priv->wm.distrust_bios_wm = false;
14891 	intel_shared_dpll_swap_state(state);
14892 	intel_atomic_track_fbs(state);
14893 
14894 	if (state->global_state_changed) {
14895 		assert_global_state_locked(dev_priv);
14896 
14897 		memcpy(dev_priv->min_cdclk, state->min_cdclk,
14898 		       sizeof(state->min_cdclk));
14899 		memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
14900 		       sizeof(state->min_voltage_level));
14901 		dev_priv->active_pipes = state->active_pipes;
14902 		dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
14903 
14904 		intel_cdclk_swap_state(state);
14905 	}
14906 
14907 	drm_atomic_state_get(&state->base);
14908 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
14909 
14910 	i915_sw_fence_commit(&state->commit_ready);
14911 	if (nonblock && state->modeset) {
14912 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
14913 	} else if (nonblock) {
14914 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
14915 	} else {
14916 		if (state->modeset)
14917 			flush_workqueue(dev_priv->modeset_wq);
14918 		intel_atomic_commit_tail(state);
14919 	}
14920 
14921 	return 0;
14922 }
14923 
14924 struct wait_rps_boost {
14925 	struct wait_queue_entry wait;
14926 
14927 	struct drm_crtc *crtc;
14928 	struct i915_request *request;
14929 };
14930 
14931 static int do_rps_boost(struct wait_queue_entry *_wait,
14932 			unsigned mode, int sync, void *key)
14933 {
14934 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14935 	struct i915_request *rq = wait->request;
14936 
14937 	/*
14938 	 * If we missed the vblank, but the request is already running it
14939 	 * is reasonable to assume that it will complete before the next
14940 	 * vblank without our intervention, so leave RPS alone.
14941 	 */
14942 	if (!i915_request_started(rq))
14943 		intel_rps_boost(rq);
14944 	i915_request_put(rq);
14945 
14946 	drm_crtc_vblank_put(wait->crtc);
14947 
14948 	list_del(&wait->wait.entry);
14949 	kfree(wait);
14950 	return 1;
14951 }
14952 
14953 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14954 				       struct dma_fence *fence)
14955 {
14956 	struct wait_rps_boost *wait;
14957 
14958 	if (!dma_fence_is_i915(fence))
14959 		return;
14960 
14961 	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14962 		return;
14963 
14964 	if (drm_crtc_vblank_get(crtc))
14965 		return;
14966 
14967 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14968 	if (!wait) {
14969 		drm_crtc_vblank_put(crtc);
14970 		return;
14971 	}
14972 
14973 	wait->request = to_request(dma_fence_get(fence));
14974 	wait->crtc = crtc;
14975 
14976 	wait->wait.func = do_rps_boost;
14977 	wait->wait.flags = 0;
14978 
14979 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14980 }
14981 
14982 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14983 {
14984 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14985 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14986 	struct drm_framebuffer *fb = plane_state->base.fb;
14987 	struct i915_vma *vma;
14988 
14989 	if (plane->id == PLANE_CURSOR &&
14990 	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14991 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14992 		const int align = intel_cursor_alignment(dev_priv);
14993 		int err;
14994 
14995 		err = i915_gem_object_attach_phys(obj, align);
14996 		if (err)
14997 			return err;
14998 	}
14999 
15000 	vma = intel_pin_and_fence_fb_obj(fb,
15001 					 &plane_state->view,
15002 					 intel_plane_uses_fence(plane_state),
15003 					 &plane_state->flags);
15004 	if (IS_ERR(vma))
15005 		return PTR_ERR(vma);
15006 
15007 	plane_state->vma = vma;
15008 
15009 	return 0;
15010 }
15011 
15012 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15013 {
15014 	struct i915_vma *vma;
15015 
15016 	vma = fetch_and_zero(&old_plane_state->vma);
15017 	if (vma)
15018 		intel_unpin_fb_vma(vma, old_plane_state->flags);
15019 }
15020 
15021 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15022 {
15023 	struct i915_sched_attr attr = {
15024 		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15025 	};
15026 
15027 	i915_gem_object_wait_priority(obj, 0, &attr);
15028 }
15029 
15030 /**
15031  * intel_prepare_plane_fb - Prepare fb for usage on plane
15032  * @plane: drm plane to prepare for
15033  * @_new_plane_state: the plane state being prepared
15034  *
15035  * Prepares a framebuffer for usage on a display plane.  Generally this
15036  * involves pinning the underlying object and updating the frontbuffer tracking
15037  * bits.  Some older platforms need special physical address handling for
15038  * cursor planes.
15039  *
15040  * Returns 0 on success, negative error code on failure.
15041  */
15042 int
15043 intel_prepare_plane_fb(struct drm_plane *plane,
15044 		       struct drm_plane_state *_new_plane_state)
15045 {
15046 	struct intel_plane_state *new_plane_state =
15047 		to_intel_plane_state(_new_plane_state);
15048 	struct intel_atomic_state *intel_state =
15049 		to_intel_atomic_state(new_plane_state->base.state);
15050 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
15051 	struct drm_framebuffer *fb = new_plane_state->base.fb;
15052 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15053 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
15054 	int ret;
15055 
15056 	if (old_obj) {
15057 		struct intel_crtc_state *crtc_state =
15058 			intel_atomic_get_new_crtc_state(intel_state,
15059 							to_intel_crtc(plane->state->crtc));
15060 
15061 		/* Big Hammer, we also need to ensure that any pending
15062 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15063 		 * current scanout is retired before unpinning the old
15064 		 * framebuffer. Note that we rely on userspace rendering
15065 		 * into the buffer attached to the pipe they are waiting
15066 		 * on. If not, userspace generates a GPU hang with IPEHR
15067 		 * point to the MI_WAIT_FOR_EVENT.
15068 		 *
15069 		 * This should only fail upon a hung GPU, in which case we
15070 		 * can safely continue.
15071 		 */
15072 		if (needs_modeset(crtc_state)) {
15073 			ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15074 							      old_obj->base.resv, NULL,
15075 							      false, 0,
15076 							      GFP_KERNEL);
15077 			if (ret < 0)
15078 				return ret;
15079 		}
15080 	}
15081 
15082 	if (new_plane_state->base.fence) { /* explicit fencing */
15083 		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
15084 						    new_plane_state->base.fence,
15085 						    I915_FENCE_TIMEOUT,
15086 						    GFP_KERNEL);
15087 		if (ret < 0)
15088 			return ret;
15089 	}
15090 
15091 	if (!obj)
15092 		return 0;
15093 
15094 	ret = i915_gem_object_pin_pages(obj);
15095 	if (ret)
15096 		return ret;
15097 
15098 	ret = intel_plane_pin_fb(new_plane_state);
15099 
15100 	i915_gem_object_unpin_pages(obj);
15101 	if (ret)
15102 		return ret;
15103 
15104 	fb_obj_bump_render_priority(obj);
15105 	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
15106 
15107 	if (!new_plane_state->base.fence) { /* implicit fencing */
15108 		struct dma_fence *fence;
15109 
15110 		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
15111 						      obj->base.resv, NULL,
15112 						      false, I915_FENCE_TIMEOUT,
15113 						      GFP_KERNEL);
15114 		if (ret < 0)
15115 			return ret;
15116 
15117 		fence = dma_resv_get_excl_rcu(obj->base.resv);
15118 		if (fence) {
15119 			add_rps_boost_after_vblank(new_plane_state->base.crtc,
15120 						   fence);
15121 			dma_fence_put(fence);
15122 		}
15123 	} else {
15124 		add_rps_boost_after_vblank(new_plane_state->base.crtc,
15125 					   new_plane_state->base.fence);
15126 	}
15127 
15128 	/*
15129 	 * We declare pageflips to be interactive and so merit a small bias
15130 	 * towards upclocking to deliver the frame on time. By only changing
15131 	 * the RPS thresholds to sample more regularly and aim for higher
15132 	 * clocks we can hopefully deliver low power workloads (like kodi)
15133 	 * that are not quite steady state without resorting to forcing
15134 	 * maximum clocks following a vblank miss (see do_rps_boost()).
15135 	 */
15136 	if (!intel_state->rps_interactive) {
15137 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
15138 		intel_state->rps_interactive = true;
15139 	}
15140 
15141 	return 0;
15142 }
15143 
15144 /**
15145  * intel_cleanup_plane_fb - Cleans up an fb after plane use
15146  * @plane: drm plane to clean up for
15147  * @_old_plane_state: the state from the previous modeset
15148  *
15149  * Cleans up a framebuffer that has just been removed from a plane.
15150  */
15151 void
15152 intel_cleanup_plane_fb(struct drm_plane *plane,
15153 		       struct drm_plane_state *_old_plane_state)
15154 {
15155 	struct intel_plane_state *old_plane_state =
15156 		to_intel_plane_state(_old_plane_state);
15157 	struct intel_atomic_state *intel_state =
15158 		to_intel_atomic_state(old_plane_state->base.state);
15159 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
15160 
15161 	if (intel_state->rps_interactive) {
15162 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
15163 		intel_state->rps_interactive = false;
15164 	}
15165 
15166 	/* Should only be called after a successful intel_prepare_plane_fb()! */
15167 	intel_plane_unpin_fb(old_plane_state);
15168 }
15169 
15170 /**
15171  * intel_plane_destroy - destroy a plane
15172  * @plane: plane to destroy
15173  *
15174  * Common destruction function for all types of planes (primary, cursor,
15175  * sprite).
15176  */
15177 void intel_plane_destroy(struct drm_plane *plane)
15178 {
15179 	drm_plane_cleanup(plane);
15180 	kfree(to_intel_plane(plane));
15181 }
15182 
15183 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
15184 					    u32 format, u64 modifier)
15185 {
15186 	switch (modifier) {
15187 	case DRM_FORMAT_MOD_LINEAR:
15188 	case I915_FORMAT_MOD_X_TILED:
15189 		break;
15190 	default:
15191 		return false;
15192 	}
15193 
15194 	switch (format) {
15195 	case DRM_FORMAT_C8:
15196 	case DRM_FORMAT_RGB565:
15197 	case DRM_FORMAT_XRGB1555:
15198 	case DRM_FORMAT_XRGB8888:
15199 		return modifier == DRM_FORMAT_MOD_LINEAR ||
15200 			modifier == I915_FORMAT_MOD_X_TILED;
15201 	default:
15202 		return false;
15203 	}
15204 }
15205 
15206 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
15207 					    u32 format, u64 modifier)
15208 {
15209 	switch (modifier) {
15210 	case DRM_FORMAT_MOD_LINEAR:
15211 	case I915_FORMAT_MOD_X_TILED:
15212 		break;
15213 	default:
15214 		return false;
15215 	}
15216 
15217 	switch (format) {
15218 	case DRM_FORMAT_C8:
15219 	case DRM_FORMAT_RGB565:
15220 	case DRM_FORMAT_XRGB8888:
15221 	case DRM_FORMAT_XBGR8888:
15222 	case DRM_FORMAT_XRGB2101010:
15223 	case DRM_FORMAT_XBGR2101010:
15224 	case DRM_FORMAT_XBGR16161616F:
15225 		return modifier == DRM_FORMAT_MOD_LINEAR ||
15226 			modifier == I915_FORMAT_MOD_X_TILED;
15227 	default:
15228 		return false;
15229 	}
15230 }
15231 
15232 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
15233 					      u32 format, u64 modifier)
15234 {
15235 	return modifier == DRM_FORMAT_MOD_LINEAR &&
15236 		format == DRM_FORMAT_ARGB8888;
15237 }
15238 
15239 static const struct drm_plane_funcs i965_plane_funcs = {
15240 	.update_plane = drm_atomic_helper_update_plane,
15241 	.disable_plane = drm_atomic_helper_disable_plane,
15242 	.destroy = intel_plane_destroy,
15243 	.atomic_duplicate_state = intel_plane_duplicate_state,
15244 	.atomic_destroy_state = intel_plane_destroy_state,
15245 	.format_mod_supported = i965_plane_format_mod_supported,
15246 };
15247 
15248 static const struct drm_plane_funcs i8xx_plane_funcs = {
15249 	.update_plane = drm_atomic_helper_update_plane,
15250 	.disable_plane = drm_atomic_helper_disable_plane,
15251 	.destroy = intel_plane_destroy,
15252 	.atomic_duplicate_state = intel_plane_duplicate_state,
15253 	.atomic_destroy_state = intel_plane_destroy_state,
15254 	.format_mod_supported = i8xx_plane_format_mod_supported,
15255 };
15256 
15257 static int
15258 intel_legacy_cursor_update(struct drm_plane *_plane,
15259 			   struct drm_crtc *_crtc,
15260 			   struct drm_framebuffer *fb,
15261 			   int crtc_x, int crtc_y,
15262 			   unsigned int crtc_w, unsigned int crtc_h,
15263 			   u32 src_x, u32 src_y,
15264 			   u32 src_w, u32 src_h,
15265 			   struct drm_modeset_acquire_ctx *ctx)
15266 {
15267 	struct intel_plane *plane = to_intel_plane(_plane);
15268 	struct intel_crtc *crtc = to_intel_crtc(_crtc);
15269 	struct intel_plane_state *old_plane_state =
15270 		to_intel_plane_state(plane->base.state);
15271 	struct intel_plane_state *new_plane_state;
15272 	struct intel_crtc_state *crtc_state =
15273 		to_intel_crtc_state(crtc->base.state);
15274 	struct intel_crtc_state *new_crtc_state;
15275 	int ret;
15276 
15277 	/*
15278 	 * When crtc is inactive or there is a modeset pending,
15279 	 * wait for it to complete in the slowpath
15280 	 */
15281 	if (!crtc_state->base.active || needs_modeset(crtc_state) ||
15282 	    crtc_state->update_pipe)
15283 		goto slow;
15284 
15285 	/*
15286 	 * Don't do an async update if there is an outstanding commit modifying
15287 	 * the plane.  This prevents our async update's changes from getting
15288 	 * overridden by a previous synchronous update's state.
15289 	 */
15290 	if (old_plane_state->base.commit &&
15291 	    !try_wait_for_completion(&old_plane_state->base.commit->hw_done))
15292 		goto slow;
15293 
15294 	/*
15295 	 * If any parameters change that may affect watermarks,
15296 	 * take the slowpath. Only changing fb or position should be
15297 	 * in the fastpath.
15298 	 */
15299 	if (old_plane_state->base.crtc != &crtc->base ||
15300 	    old_plane_state->base.src_w != src_w ||
15301 	    old_plane_state->base.src_h != src_h ||
15302 	    old_plane_state->base.crtc_w != crtc_w ||
15303 	    old_plane_state->base.crtc_h != crtc_h ||
15304 	    !old_plane_state->base.fb != !fb)
15305 		goto slow;
15306 
15307 	new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
15308 	if (!new_plane_state)
15309 		return -ENOMEM;
15310 
15311 	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
15312 	if (!new_crtc_state) {
15313 		ret = -ENOMEM;
15314 		goto out_free;
15315 	}
15316 
15317 	drm_atomic_set_fb_for_plane(&new_plane_state->base, fb);
15318 
15319 	new_plane_state->base.src_x = src_x;
15320 	new_plane_state->base.src_y = src_y;
15321 	new_plane_state->base.src_w = src_w;
15322 	new_plane_state->base.src_h = src_h;
15323 	new_plane_state->base.crtc_x = crtc_x;
15324 	new_plane_state->base.crtc_y = crtc_y;
15325 	new_plane_state->base.crtc_w = crtc_w;
15326 	new_plane_state->base.crtc_h = crtc_h;
15327 
15328 	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
15329 						  old_plane_state, new_plane_state);
15330 	if (ret)
15331 		goto out_free;
15332 
15333 	ret = intel_plane_pin_fb(new_plane_state);
15334 	if (ret)
15335 		goto out_free;
15336 
15337 	intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP);
15338 	intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
15339 				to_intel_frontbuffer(new_plane_state->base.fb),
15340 				plane->frontbuffer_bit);
15341 
15342 	/* Swap plane state */
15343 	plane->base.state = &new_plane_state->base;
15344 
15345 	/*
15346 	 * We cannot swap crtc_state as it may be in use by an atomic commit or
15347 	 * page flip that's running simultaneously. If we swap crtc_state and
15348 	 * destroy the old state, we will cause a use-after-free there.
15349 	 *
15350 	 * Only update active_planes, which is needed for our internal
15351 	 * bookkeeping. Either value will do the right thing when updating
15352 	 * planes atomically. If the cursor was part of the atomic update then
15353 	 * we would have taken the slowpath.
15354 	 */
15355 	crtc_state->active_planes = new_crtc_state->active_planes;
15356 
15357 	if (new_plane_state->base.visible)
15358 		intel_update_plane(plane, crtc_state, new_plane_state);
15359 	else
15360 		intel_disable_plane(plane, crtc_state);
15361 
15362 	intel_plane_unpin_fb(old_plane_state);
15363 
15364 out_free:
15365 	if (new_crtc_state)
15366 		intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base);
15367 	if (ret)
15368 		intel_plane_destroy_state(&plane->base, &new_plane_state->base);
15369 	else
15370 		intel_plane_destroy_state(&plane->base, &old_plane_state->base);
15371 	return ret;
15372 
15373 slow:
15374 	return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
15375 					      crtc_x, crtc_y, crtc_w, crtc_h,
15376 					      src_x, src_y, src_w, src_h, ctx);
15377 }
15378 
15379 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
15380 	.update_plane = intel_legacy_cursor_update,
15381 	.disable_plane = drm_atomic_helper_disable_plane,
15382 	.destroy = intel_plane_destroy,
15383 	.atomic_duplicate_state = intel_plane_duplicate_state,
15384 	.atomic_destroy_state = intel_plane_destroy_state,
15385 	.format_mod_supported = intel_cursor_format_mod_supported,
15386 };
15387 
15388 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
15389 			       enum i9xx_plane_id i9xx_plane)
15390 {
15391 	if (!HAS_FBC(dev_priv))
15392 		return false;
15393 
15394 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15395 		return i9xx_plane == PLANE_A; /* tied to pipe A */
15396 	else if (IS_IVYBRIDGE(dev_priv))
15397 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
15398 			i9xx_plane == PLANE_C;
15399 	else if (INTEL_GEN(dev_priv) >= 4)
15400 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
15401 	else
15402 		return i9xx_plane == PLANE_A;
15403 }
15404 
15405 static struct intel_plane *
15406 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
15407 {
15408 	struct intel_plane *plane;
15409 	const struct drm_plane_funcs *plane_funcs;
15410 	unsigned int supported_rotations;
15411 	unsigned int possible_crtcs;
15412 	const u64 *modifiers;
15413 	const u32 *formats;
15414 	int num_formats;
15415 	int ret, zpos;
15416 
15417 	if (INTEL_GEN(dev_priv) >= 9)
15418 		return skl_universal_plane_create(dev_priv, pipe,
15419 						  PLANE_PRIMARY);
15420 
15421 	plane = intel_plane_alloc();
15422 	if (IS_ERR(plane))
15423 		return plane;
15424 
15425 	plane->pipe = pipe;
15426 	/*
15427 	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
15428 	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
15429 	 */
15430 	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
15431 		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
15432 	else
15433 		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
15434 	plane->id = PLANE_PRIMARY;
15435 	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
15436 
15437 	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
15438 	if (plane->has_fbc) {
15439 		struct intel_fbc *fbc = &dev_priv->fbc;
15440 
15441 		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
15442 	}
15443 
15444 	if (INTEL_GEN(dev_priv) >= 4) {
15445 		/*
15446 		 * WaFP16GammaEnabling:ivb
15447 		 * "Workaround : When using the 64-bit format, the plane
15448 		 *  output on each color channel has one quarter amplitude.
15449 		 *  It can be brought up to full amplitude by using pipe
15450 		 *  gamma correction or pipe color space conversion to
15451 		 *  multiply the plane output by four."
15452 		 *
15453 		 * There is no dedicated plane gamma for the primary plane,
15454 		 * and using the pipe gamma/csc could conflict with other
15455 		 * planes, so we choose not to expose fp16 on IVB primary
15456 		 * planes. HSW primary planes no longer have this problem.
15457 		 */
15458 		if (IS_IVYBRIDGE(dev_priv)) {
15459 			formats = ivb_primary_formats;
15460 			num_formats = ARRAY_SIZE(ivb_primary_formats);
15461 		} else {
15462 			formats = i965_primary_formats;
15463 			num_formats = ARRAY_SIZE(i965_primary_formats);
15464 		}
15465 		modifiers = i9xx_format_modifiers;
15466 
15467 		plane->max_stride = i9xx_plane_max_stride;
15468 		plane->update_plane = i9xx_update_plane;
15469 		plane->disable_plane = i9xx_disable_plane;
15470 		plane->get_hw_state = i9xx_plane_get_hw_state;
15471 		plane->check_plane = i9xx_plane_check;
15472 
15473 		if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15474 			plane->min_cdclk = hsw_plane_min_cdclk;
15475 		else if (IS_IVYBRIDGE(dev_priv))
15476 			plane->min_cdclk = ivb_plane_min_cdclk;
15477 		else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
15478 			plane->min_cdclk = vlv_plane_min_cdclk;
15479 		else
15480 			plane->min_cdclk = i9xx_plane_min_cdclk;
15481 
15482 		plane_funcs = &i965_plane_funcs;
15483 	} else {
15484 		formats = i8xx_primary_formats;
15485 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
15486 		modifiers = i9xx_format_modifiers;
15487 
15488 		plane->max_stride = i9xx_plane_max_stride;
15489 		plane->update_plane = i9xx_update_plane;
15490 		plane->disable_plane = i9xx_disable_plane;
15491 		plane->get_hw_state = i9xx_plane_get_hw_state;
15492 		plane->check_plane = i9xx_plane_check;
15493 		plane->min_cdclk = i9xx_plane_min_cdclk;
15494 
15495 		plane_funcs = &i8xx_plane_funcs;
15496 	}
15497 
15498 	possible_crtcs = BIT(pipe);
15499 
15500 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
15501 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15502 					       possible_crtcs, plane_funcs,
15503 					       formats, num_formats, modifiers,
15504 					       DRM_PLANE_TYPE_PRIMARY,
15505 					       "primary %c", pipe_name(pipe));
15506 	else
15507 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
15508 					       possible_crtcs, plane_funcs,
15509 					       formats, num_formats, modifiers,
15510 					       DRM_PLANE_TYPE_PRIMARY,
15511 					       "plane %c",
15512 					       plane_name(plane->i9xx_plane));
15513 	if (ret)
15514 		goto fail;
15515 
15516 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
15517 		supported_rotations =
15518 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
15519 			DRM_MODE_REFLECT_X;
15520 	} else if (INTEL_GEN(dev_priv) >= 4) {
15521 		supported_rotations =
15522 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
15523 	} else {
15524 		supported_rotations = DRM_MODE_ROTATE_0;
15525 	}
15526 
15527 	if (INTEL_GEN(dev_priv) >= 4)
15528 		drm_plane_create_rotation_property(&plane->base,
15529 						   DRM_MODE_ROTATE_0,
15530 						   supported_rotations);
15531 
15532 	zpos = 0;
15533 	drm_plane_create_zpos_immutable_property(&plane->base, zpos);
15534 
15535 	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
15536 
15537 	return plane;
15538 
15539 fail:
15540 	intel_plane_free(plane);
15541 
15542 	return ERR_PTR(ret);
15543 }
15544 
15545 static struct intel_plane *
15546 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
15547 			  enum pipe pipe)
15548 {
15549 	unsigned int possible_crtcs;
15550 	struct intel_plane *cursor;
15551 	int ret, zpos;
15552 
15553 	cursor = intel_plane_alloc();
15554 	if (IS_ERR(cursor))
15555 		return cursor;
15556 
15557 	cursor->pipe = pipe;
15558 	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
15559 	cursor->id = PLANE_CURSOR;
15560 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
15561 
15562 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15563 		cursor->max_stride = i845_cursor_max_stride;
15564 		cursor->update_plane = i845_update_cursor;
15565 		cursor->disable_plane = i845_disable_cursor;
15566 		cursor->get_hw_state = i845_cursor_get_hw_state;
15567 		cursor->check_plane = i845_check_cursor;
15568 	} else {
15569 		cursor->max_stride = i9xx_cursor_max_stride;
15570 		cursor->update_plane = i9xx_update_cursor;
15571 		cursor->disable_plane = i9xx_disable_cursor;
15572 		cursor->get_hw_state = i9xx_cursor_get_hw_state;
15573 		cursor->check_plane = i9xx_check_cursor;
15574 	}
15575 
15576 	cursor->cursor.base = ~0;
15577 	cursor->cursor.cntl = ~0;
15578 
15579 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
15580 		cursor->cursor.size = ~0;
15581 
15582 	possible_crtcs = BIT(pipe);
15583 
15584 	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
15585 				       possible_crtcs, &intel_cursor_plane_funcs,
15586 				       intel_cursor_formats,
15587 				       ARRAY_SIZE(intel_cursor_formats),
15588 				       cursor_format_modifiers,
15589 				       DRM_PLANE_TYPE_CURSOR,
15590 				       "cursor %c", pipe_name(pipe));
15591 	if (ret)
15592 		goto fail;
15593 
15594 	if (INTEL_GEN(dev_priv) >= 4)
15595 		drm_plane_create_rotation_property(&cursor->base,
15596 						   DRM_MODE_ROTATE_0,
15597 						   DRM_MODE_ROTATE_0 |
15598 						   DRM_MODE_ROTATE_180);
15599 
15600 	zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
15601 	drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
15602 
15603 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
15604 
15605 	return cursor;
15606 
15607 fail:
15608 	intel_plane_free(cursor);
15609 
15610 	return ERR_PTR(ret);
15611 }
15612 
15613 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
15614 				    struct intel_crtc_state *crtc_state)
15615 {
15616 	struct intel_crtc_scaler_state *scaler_state =
15617 		&crtc_state->scaler_state;
15618 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15619 	int i;
15620 
15621 	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
15622 	if (!crtc->num_scalers)
15623 		return;
15624 
15625 	for (i = 0; i < crtc->num_scalers; i++) {
15626 		struct intel_scaler *scaler = &scaler_state->scalers[i];
15627 
15628 		scaler->in_use = 0;
15629 		scaler->mode = 0;
15630 	}
15631 
15632 	scaler_state->scaler_id = -1;
15633 }
15634 
15635 #define INTEL_CRTC_FUNCS \
15636 	.gamma_set = drm_atomic_helper_legacy_gamma_set, \
15637 	.set_config = drm_atomic_helper_set_config, \
15638 	.destroy = intel_crtc_destroy, \
15639 	.page_flip = drm_atomic_helper_page_flip, \
15640 	.atomic_duplicate_state = intel_crtc_duplicate_state, \
15641 	.atomic_destroy_state = intel_crtc_destroy_state, \
15642 	.set_crc_source = intel_crtc_set_crc_source, \
15643 	.verify_crc_source = intel_crtc_verify_crc_source, \
15644 	.get_crc_sources = intel_crtc_get_crc_sources
15645 
15646 static const struct drm_crtc_funcs bdw_crtc_funcs = {
15647 	INTEL_CRTC_FUNCS,
15648 
15649 	.get_vblank_counter = g4x_get_vblank_counter,
15650 	.enable_vblank = bdw_enable_vblank,
15651 	.disable_vblank = bdw_disable_vblank,
15652 };
15653 
15654 static const struct drm_crtc_funcs ilk_crtc_funcs = {
15655 	INTEL_CRTC_FUNCS,
15656 
15657 	.get_vblank_counter = g4x_get_vblank_counter,
15658 	.enable_vblank = ilk_enable_vblank,
15659 	.disable_vblank = ilk_disable_vblank,
15660 };
15661 
15662 static const struct drm_crtc_funcs g4x_crtc_funcs = {
15663 	INTEL_CRTC_FUNCS,
15664 
15665 	.get_vblank_counter = g4x_get_vblank_counter,
15666 	.enable_vblank = i965_enable_vblank,
15667 	.disable_vblank = i965_disable_vblank,
15668 };
15669 
15670 static const struct drm_crtc_funcs i965_crtc_funcs = {
15671 	INTEL_CRTC_FUNCS,
15672 
15673 	.get_vblank_counter = i915_get_vblank_counter,
15674 	.enable_vblank = i965_enable_vblank,
15675 	.disable_vblank = i965_disable_vblank,
15676 };
15677 
15678 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
15679 	INTEL_CRTC_FUNCS,
15680 
15681 	.get_vblank_counter = i915_get_vblank_counter,
15682 	.enable_vblank = i915gm_enable_vblank,
15683 	.disable_vblank = i915gm_disable_vblank,
15684 };
15685 
15686 static const struct drm_crtc_funcs i915_crtc_funcs = {
15687 	INTEL_CRTC_FUNCS,
15688 
15689 	.get_vblank_counter = i915_get_vblank_counter,
15690 	.enable_vblank = i8xx_enable_vblank,
15691 	.disable_vblank = i8xx_disable_vblank,
15692 };
15693 
15694 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
15695 	INTEL_CRTC_FUNCS,
15696 
15697 	/* no hw vblank counter */
15698 	.enable_vblank = i8xx_enable_vblank,
15699 	.disable_vblank = i8xx_disable_vblank,
15700 };
15701 
15702 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
15703 {
15704 	const struct drm_crtc_funcs *funcs;
15705 	struct intel_crtc *intel_crtc;
15706 	struct intel_crtc_state *crtc_state = NULL;
15707 	struct intel_plane *primary = NULL;
15708 	struct intel_plane *cursor = NULL;
15709 	int sprite, ret;
15710 
15711 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
15712 	if (!intel_crtc)
15713 		return -ENOMEM;
15714 
15715 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
15716 	if (!crtc_state) {
15717 		ret = -ENOMEM;
15718 		goto fail;
15719 	}
15720 	__drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
15721 	intel_crtc->config = crtc_state;
15722 
15723 	primary = intel_primary_plane_create(dev_priv, pipe);
15724 	if (IS_ERR(primary)) {
15725 		ret = PTR_ERR(primary);
15726 		goto fail;
15727 	}
15728 	intel_crtc->plane_ids_mask |= BIT(primary->id);
15729 
15730 	for_each_sprite(dev_priv, pipe, sprite) {
15731 		struct intel_plane *plane;
15732 
15733 		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
15734 		if (IS_ERR(plane)) {
15735 			ret = PTR_ERR(plane);
15736 			goto fail;
15737 		}
15738 		intel_crtc->plane_ids_mask |= BIT(plane->id);
15739 	}
15740 
15741 	cursor = intel_cursor_plane_create(dev_priv, pipe);
15742 	if (IS_ERR(cursor)) {
15743 		ret = PTR_ERR(cursor);
15744 		goto fail;
15745 	}
15746 	intel_crtc->plane_ids_mask |= BIT(cursor->id);
15747 
15748 	if (HAS_GMCH(dev_priv)) {
15749 		if (IS_CHERRYVIEW(dev_priv) ||
15750 		    IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
15751 			funcs = &g4x_crtc_funcs;
15752 		else if (IS_GEN(dev_priv, 4))
15753 			funcs = &i965_crtc_funcs;
15754 		else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
15755 			funcs = &i915gm_crtc_funcs;
15756 		else if (IS_GEN(dev_priv, 3))
15757 			funcs = &i915_crtc_funcs;
15758 		else
15759 			funcs = &i8xx_crtc_funcs;
15760 	} else {
15761 		if (INTEL_GEN(dev_priv) >= 8)
15762 			funcs = &bdw_crtc_funcs;
15763 		else
15764 			funcs = &ilk_crtc_funcs;
15765 	}
15766 
15767 	ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
15768 					&primary->base, &cursor->base,
15769 					funcs, "pipe %c", pipe_name(pipe));
15770 	if (ret)
15771 		goto fail;
15772 
15773 	intel_crtc->pipe = pipe;
15774 
15775 	/* initialize shared scalers */
15776 	intel_crtc_init_scalers(intel_crtc, crtc_state);
15777 
15778 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15779 	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15780 	dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15781 
15782 	if (INTEL_GEN(dev_priv) < 9) {
15783 		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15784 
15785 		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15786 		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15787 		dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15788 	}
15789 
15790 	intel_color_init(intel_crtc);
15791 
15792 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15793 
15794 	return 0;
15795 
15796 fail:
15797 	/*
15798 	 * drm_mode_config_cleanup() will free up any
15799 	 * crtcs/planes already initialized.
15800 	 */
15801 	kfree(crtc_state);
15802 	kfree(intel_crtc);
15803 
15804 	return ret;
15805 }
15806 
15807 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15808 				      struct drm_file *file)
15809 {
15810 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15811 	struct drm_crtc *drmmode_crtc;
15812 	struct intel_crtc *crtc;
15813 
15814 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15815 	if (!drmmode_crtc)
15816 		return -ENOENT;
15817 
15818 	crtc = to_intel_crtc(drmmode_crtc);
15819 	pipe_from_crtc_id->pipe = crtc->pipe;
15820 
15821 	return 0;
15822 }
15823 
15824 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
15825 {
15826 	struct drm_device *dev = encoder->base.dev;
15827 	struct intel_encoder *source_encoder;
15828 	u32 possible_clones = 0;
15829 
15830 	for_each_intel_encoder(dev, source_encoder) {
15831 		if (encoders_cloneable(encoder, source_encoder))
15832 			possible_clones |= drm_encoder_mask(&source_encoder->base);
15833 	}
15834 
15835 	return possible_clones;
15836 }
15837 
15838 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
15839 {
15840 	struct drm_device *dev = encoder->base.dev;
15841 	struct intel_crtc *crtc;
15842 	u32 possible_crtcs = 0;
15843 
15844 	for_each_intel_crtc(dev, crtc) {
15845 		if (encoder->pipe_mask & BIT(crtc->pipe))
15846 			possible_crtcs |= drm_crtc_mask(&crtc->base);
15847 	}
15848 
15849 	return possible_crtcs;
15850 }
15851 
15852 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15853 {
15854 	if (!IS_MOBILE(dev_priv))
15855 		return false;
15856 
15857 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15858 		return false;
15859 
15860 	if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15861 		return false;
15862 
15863 	return true;
15864 }
15865 
15866 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
15867 {
15868 	if (INTEL_GEN(dev_priv) >= 9)
15869 		return false;
15870 
15871 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15872 		return false;
15873 
15874 	if (HAS_PCH_LPT_H(dev_priv) &&
15875 	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15876 		return false;
15877 
15878 	/* DDI E can't be used if DDI A requires 4 lanes */
15879 	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15880 		return false;
15881 
15882 	if (!dev_priv->vbt.int_crt_support)
15883 		return false;
15884 
15885 	return true;
15886 }
15887 
15888 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15889 {
15890 	int pps_num;
15891 	int pps_idx;
15892 
15893 	if (HAS_DDI(dev_priv))
15894 		return;
15895 	/*
15896 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
15897 	 * everywhere where registers can be write protected.
15898 	 */
15899 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15900 		pps_num = 2;
15901 	else
15902 		pps_num = 1;
15903 
15904 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15905 		u32 val = I915_READ(PP_CONTROL(pps_idx));
15906 
15907 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15908 		I915_WRITE(PP_CONTROL(pps_idx), val);
15909 	}
15910 }
15911 
15912 static void intel_pps_init(struct drm_i915_private *dev_priv)
15913 {
15914 	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15915 		dev_priv->pps_mmio_base = PCH_PPS_BASE;
15916 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15917 		dev_priv->pps_mmio_base = VLV_PPS_BASE;
15918 	else
15919 		dev_priv->pps_mmio_base = PPS_BASE;
15920 
15921 	intel_pps_unlock_regs_wa(dev_priv);
15922 }
15923 
15924 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15925 {
15926 	struct intel_encoder *encoder;
15927 	bool dpd_is_edp = false;
15928 
15929 	intel_pps_init(dev_priv);
15930 
15931 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
15932 		return;
15933 
15934 	if (INTEL_GEN(dev_priv) >= 12) {
15935 		intel_ddi_init(dev_priv, PORT_A);
15936 		intel_ddi_init(dev_priv, PORT_B);
15937 		intel_ddi_init(dev_priv, PORT_D);
15938 		intel_ddi_init(dev_priv, PORT_E);
15939 		intel_ddi_init(dev_priv, PORT_F);
15940 		intel_ddi_init(dev_priv, PORT_G);
15941 		intel_ddi_init(dev_priv, PORT_H);
15942 		intel_ddi_init(dev_priv, PORT_I);
15943 		icl_dsi_init(dev_priv);
15944 	} else if (IS_ELKHARTLAKE(dev_priv)) {
15945 		intel_ddi_init(dev_priv, PORT_A);
15946 		intel_ddi_init(dev_priv, PORT_B);
15947 		intel_ddi_init(dev_priv, PORT_C);
15948 		intel_ddi_init(dev_priv, PORT_D);
15949 		icl_dsi_init(dev_priv);
15950 	} else if (IS_GEN(dev_priv, 11)) {
15951 		intel_ddi_init(dev_priv, PORT_A);
15952 		intel_ddi_init(dev_priv, PORT_B);
15953 		intel_ddi_init(dev_priv, PORT_C);
15954 		intel_ddi_init(dev_priv, PORT_D);
15955 		intel_ddi_init(dev_priv, PORT_E);
15956 		/*
15957 		 * On some ICL SKUs port F is not present. No strap bits for
15958 		 * this, so rely on VBT.
15959 		 * Work around broken VBTs on SKUs known to have no port F.
15960 		 */
15961 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
15962 		    intel_bios_is_port_present(dev_priv, PORT_F))
15963 			intel_ddi_init(dev_priv, PORT_F);
15964 
15965 		icl_dsi_init(dev_priv);
15966 	} else if (IS_GEN9_LP(dev_priv)) {
15967 		/*
15968 		 * FIXME: Broxton doesn't support port detection via the
15969 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15970 		 * detect the ports.
15971 		 */
15972 		intel_ddi_init(dev_priv, PORT_A);
15973 		intel_ddi_init(dev_priv, PORT_B);
15974 		intel_ddi_init(dev_priv, PORT_C);
15975 
15976 		vlv_dsi_init(dev_priv);
15977 	} else if (HAS_DDI(dev_priv)) {
15978 		int found;
15979 
15980 		if (intel_ddi_crt_present(dev_priv))
15981 			intel_crt_init(dev_priv);
15982 
15983 		/*
15984 		 * Haswell uses DDI functions to detect digital outputs.
15985 		 * On SKL pre-D0 the strap isn't connected, so we assume
15986 		 * it's there.
15987 		 */
15988 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15989 		/* WaIgnoreDDIAStrap: skl */
15990 		if (found || IS_GEN9_BC(dev_priv))
15991 			intel_ddi_init(dev_priv, PORT_A);
15992 
15993 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15994 		 * register */
15995 		found = I915_READ(SFUSE_STRAP);
15996 
15997 		if (found & SFUSE_STRAP_DDIB_DETECTED)
15998 			intel_ddi_init(dev_priv, PORT_B);
15999 		if (found & SFUSE_STRAP_DDIC_DETECTED)
16000 			intel_ddi_init(dev_priv, PORT_C);
16001 		if (found & SFUSE_STRAP_DDID_DETECTED)
16002 			intel_ddi_init(dev_priv, PORT_D);
16003 		if (found & SFUSE_STRAP_DDIF_DETECTED)
16004 			intel_ddi_init(dev_priv, PORT_F);
16005 		/*
16006 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16007 		 */
16008 		if (IS_GEN9_BC(dev_priv) &&
16009 		    intel_bios_is_port_present(dev_priv, PORT_E))
16010 			intel_ddi_init(dev_priv, PORT_E);
16011 
16012 	} else if (HAS_PCH_SPLIT(dev_priv)) {
16013 		int found;
16014 
16015 		/*
16016 		 * intel_edp_init_connector() depends on this completing first,
16017 		 * to prevent the registration of both eDP and LVDS and the
16018 		 * incorrect sharing of the PPS.
16019 		 */
16020 		intel_lvds_init(dev_priv);
16021 		intel_crt_init(dev_priv);
16022 
16023 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16024 
16025 		if (ilk_has_edp_a(dev_priv))
16026 			intel_dp_init(dev_priv, DP_A, PORT_A);
16027 
16028 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
16029 			/* PCH SDVOB multiplex with HDMIB */
16030 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16031 			if (!found)
16032 				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16033 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
16034 				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16035 		}
16036 
16037 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
16038 			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16039 
16040 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
16041 			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16042 
16043 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
16044 			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16045 
16046 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
16047 			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16048 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16049 		bool has_edp, has_port;
16050 
16051 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16052 			intel_crt_init(dev_priv);
16053 
16054 		/*
16055 		 * The DP_DETECTED bit is the latched state of the DDC
16056 		 * SDA pin at boot. However since eDP doesn't require DDC
16057 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
16058 		 * eDP ports may have been muxed to an alternate function.
16059 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
16060 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
16061 		 * detect eDP ports.
16062 		 *
16063 		 * Sadly the straps seem to be missing sometimes even for HDMI
16064 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16065 		 * and VBT for the presence of the port. Additionally we can't
16066 		 * trust the port type the VBT declares as we've seen at least
16067 		 * HDMI ports that the VBT claim are DP or eDP.
16068 		 */
16069 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16070 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16071 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
16072 			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16073 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16074 			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16075 
16076 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16077 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16078 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
16079 			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16080 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16081 			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16082 
16083 		if (IS_CHERRYVIEW(dev_priv)) {
16084 			/*
16085 			 * eDP not supported on port D,
16086 			 * so no need to worry about it
16087 			 */
16088 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16089 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
16090 				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16091 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
16092 				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16093 		}
16094 
16095 		vlv_dsi_init(dev_priv);
16096 	} else if (IS_PINEVIEW(dev_priv)) {
16097 		intel_lvds_init(dev_priv);
16098 		intel_crt_init(dev_priv);
16099 	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16100 		bool found = false;
16101 
16102 		if (IS_MOBILE(dev_priv))
16103 			intel_lvds_init(dev_priv);
16104 
16105 		intel_crt_init(dev_priv);
16106 
16107 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16108 			DRM_DEBUG_KMS("probing SDVOB\n");
16109 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16110 			if (!found && IS_G4X(dev_priv)) {
16111 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
16112 				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
16113 			}
16114 
16115 			if (!found && IS_G4X(dev_priv))
16116 				intel_dp_init(dev_priv, DP_B, PORT_B);
16117 		}
16118 
16119 		/* Before G4X SDVOC doesn't have its own detect register */
16120 
16121 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
16122 			DRM_DEBUG_KMS("probing SDVOC\n");
16123 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
16124 		}
16125 
16126 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
16127 
16128 			if (IS_G4X(dev_priv)) {
16129 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
16130 				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
16131 			}
16132 			if (IS_G4X(dev_priv))
16133 				intel_dp_init(dev_priv, DP_C, PORT_C);
16134 		}
16135 
16136 		if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
16137 			intel_dp_init(dev_priv, DP_D, PORT_D);
16138 
16139 		if (SUPPORTS_TV(dev_priv))
16140 			intel_tv_init(dev_priv);
16141 	} else if (IS_GEN(dev_priv, 2)) {
16142 		if (IS_I85X(dev_priv))
16143 			intel_lvds_init(dev_priv);
16144 
16145 		intel_crt_init(dev_priv);
16146 		intel_dvo_init(dev_priv);
16147 	}
16148 
16149 	intel_psr_init(dev_priv);
16150 
16151 	for_each_intel_encoder(&dev_priv->drm, encoder) {
16152 		encoder->base.possible_crtcs =
16153 			intel_encoder_possible_crtcs(encoder);
16154 		encoder->base.possible_clones =
16155 			intel_encoder_possible_clones(encoder);
16156 	}
16157 
16158 	intel_init_pch_refclk(dev_priv);
16159 
16160 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
16161 }
16162 
16163 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
16164 {
16165 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
16166 
16167 	drm_framebuffer_cleanup(fb);
16168 	intel_frontbuffer_put(intel_fb->frontbuffer);
16169 
16170 	kfree(intel_fb);
16171 }
16172 
16173 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
16174 						struct drm_file *file,
16175 						unsigned int *handle)
16176 {
16177 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16178 
16179 	if (obj->userptr.mm) {
16180 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
16181 		return -EINVAL;
16182 	}
16183 
16184 	return drm_gem_handle_create(file, &obj->base, handle);
16185 }
16186 
16187 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
16188 					struct drm_file *file,
16189 					unsigned flags, unsigned color,
16190 					struct drm_clip_rect *clips,
16191 					unsigned num_clips)
16192 {
16193 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16194 
16195 	i915_gem_object_flush_if_display(obj);
16196 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
16197 
16198 	return 0;
16199 }
16200 
16201 static const struct drm_framebuffer_funcs intel_fb_funcs = {
16202 	.destroy = intel_user_framebuffer_destroy,
16203 	.create_handle = intel_user_framebuffer_create_handle,
16204 	.dirty = intel_user_framebuffer_dirty,
16205 };
16206 
16207 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
16208 				  struct drm_i915_gem_object *obj,
16209 				  struct drm_mode_fb_cmd2 *mode_cmd)
16210 {
16211 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
16212 	struct drm_framebuffer *fb = &intel_fb->base;
16213 	u32 max_stride;
16214 	unsigned int tiling, stride;
16215 	int ret = -EINVAL;
16216 	int i;
16217 
16218 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
16219 	if (!intel_fb->frontbuffer)
16220 		return -ENOMEM;
16221 
16222 	i915_gem_object_lock(obj);
16223 	tiling = i915_gem_object_get_tiling(obj);
16224 	stride = i915_gem_object_get_stride(obj);
16225 	i915_gem_object_unlock(obj);
16226 
16227 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
16228 		/*
16229 		 * If there's a fence, enforce that
16230 		 * the fb modifier and tiling mode match.
16231 		 */
16232 		if (tiling != I915_TILING_NONE &&
16233 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16234 			DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
16235 			goto err;
16236 		}
16237 	} else {
16238 		if (tiling == I915_TILING_X) {
16239 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
16240 		} else if (tiling == I915_TILING_Y) {
16241 			DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
16242 			goto err;
16243 		}
16244 	}
16245 
16246 	if (!drm_any_plane_has_format(&dev_priv->drm,
16247 				      mode_cmd->pixel_format,
16248 				      mode_cmd->modifier[0])) {
16249 		struct drm_format_name_buf format_name;
16250 
16251 		DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
16252 			      drm_get_format_name(mode_cmd->pixel_format,
16253 						  &format_name),
16254 			      mode_cmd->modifier[0]);
16255 		goto err;
16256 	}
16257 
16258 	/*
16259 	 * gen2/3 display engine uses the fence if present,
16260 	 * so the tiling mode must match the fb modifier exactly.
16261 	 */
16262 	if (INTEL_GEN(dev_priv) < 4 &&
16263 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
16264 		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
16265 		goto err;
16266 	}
16267 
16268 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
16269 					 mode_cmd->modifier[0]);
16270 	if (mode_cmd->pitches[0] > max_stride) {
16271 		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
16272 			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
16273 			      "tiled" : "linear",
16274 			      mode_cmd->pitches[0], max_stride);
16275 		goto err;
16276 	}
16277 
16278 	/*
16279 	 * If there's a fence, enforce that
16280 	 * the fb pitch and fence stride match.
16281 	 */
16282 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
16283 		DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
16284 			      mode_cmd->pitches[0], stride);
16285 		goto err;
16286 	}
16287 
16288 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
16289 	if (mode_cmd->offsets[0] != 0)
16290 		goto err;
16291 
16292 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
16293 
16294 	for (i = 0; i < fb->format->num_planes; i++) {
16295 		u32 stride_alignment;
16296 
16297 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
16298 			DRM_DEBUG_KMS("bad plane %d handle\n", i);
16299 			goto err;
16300 		}
16301 
16302 		stride_alignment = intel_fb_stride_alignment(fb, i);
16303 
16304 		/*
16305 		 * Display WA #0531: skl,bxt,kbl,glk
16306 		 *
16307 		 * Render decompression and plane width > 3840
16308 		 * combined with horizontal panning requires the
16309 		 * plane stride to be a multiple of 4. We'll just
16310 		 * require the entire fb to accommodate that to avoid
16311 		 * potential runtime errors at plane configuration time.
16312 		 */
16313 		if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
16314 		    is_ccs_modifier(fb->modifier))
16315 			stride_alignment *= 4;
16316 
16317 		if (fb->pitches[i] & (stride_alignment - 1)) {
16318 			DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
16319 				      i, fb->pitches[i], stride_alignment);
16320 			goto err;
16321 		}
16322 
16323 		fb->obj[i] = &obj->base;
16324 	}
16325 
16326 	ret = intel_fill_fb_info(dev_priv, fb);
16327 	if (ret)
16328 		goto err;
16329 
16330 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
16331 	if (ret) {
16332 		DRM_ERROR("framebuffer init failed %d\n", ret);
16333 		goto err;
16334 	}
16335 
16336 	return 0;
16337 
16338 err:
16339 	intel_frontbuffer_put(intel_fb->frontbuffer);
16340 	return ret;
16341 }
16342 
16343 static struct drm_framebuffer *
16344 intel_user_framebuffer_create(struct drm_device *dev,
16345 			      struct drm_file *filp,
16346 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
16347 {
16348 	struct drm_framebuffer *fb;
16349 	struct drm_i915_gem_object *obj;
16350 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
16351 
16352 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
16353 	if (!obj)
16354 		return ERR_PTR(-ENOENT);
16355 
16356 	fb = intel_framebuffer_create(obj, &mode_cmd);
16357 	i915_gem_object_put(obj);
16358 
16359 	return fb;
16360 }
16361 
16362 static void intel_atomic_state_free(struct drm_atomic_state *state)
16363 {
16364 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
16365 
16366 	drm_atomic_state_default_release(state);
16367 
16368 	i915_sw_fence_fini(&intel_state->commit_ready);
16369 
16370 	kfree(state);
16371 }
16372 
16373 static enum drm_mode_status
16374 intel_mode_valid(struct drm_device *dev,
16375 		 const struct drm_display_mode *mode)
16376 {
16377 	struct drm_i915_private *dev_priv = to_i915(dev);
16378 	int hdisplay_max, htotal_max;
16379 	int vdisplay_max, vtotal_max;
16380 
16381 	/*
16382 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
16383 	 * of DBLSCAN modes to the output's mode list when they detect
16384 	 * the scaling mode property on the connector. And they don't
16385 	 * ask the kernel to validate those modes in any way until
16386 	 * modeset time at which point the client gets a protocol error.
16387 	 * So in order to not upset those clients we silently ignore the
16388 	 * DBLSCAN flag on such connectors. For other connectors we will
16389 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
16390 	 * And we always reject DBLSCAN modes in connector->mode_valid()
16391 	 * as we never want such modes on the connector's mode list.
16392 	 */
16393 
16394 	if (mode->vscan > 1)
16395 		return MODE_NO_VSCAN;
16396 
16397 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
16398 		return MODE_H_ILLEGAL;
16399 
16400 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
16401 			   DRM_MODE_FLAG_NCSYNC |
16402 			   DRM_MODE_FLAG_PCSYNC))
16403 		return MODE_HSYNC;
16404 
16405 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
16406 			   DRM_MODE_FLAG_PIXMUX |
16407 			   DRM_MODE_FLAG_CLKDIV2))
16408 		return MODE_BAD;
16409 
16410 	/* Transcoder timing limits */
16411 	if (INTEL_GEN(dev_priv) >= 11) {
16412 		hdisplay_max = 16384;
16413 		vdisplay_max = 8192;
16414 		htotal_max = 16384;
16415 		vtotal_max = 8192;
16416 	} else if (INTEL_GEN(dev_priv) >= 9 ||
16417 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
16418 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
16419 		vdisplay_max = 4096;
16420 		htotal_max = 8192;
16421 		vtotal_max = 8192;
16422 	} else if (INTEL_GEN(dev_priv) >= 3) {
16423 		hdisplay_max = 4096;
16424 		vdisplay_max = 4096;
16425 		htotal_max = 8192;
16426 		vtotal_max = 8192;
16427 	} else {
16428 		hdisplay_max = 2048;
16429 		vdisplay_max = 2048;
16430 		htotal_max = 4096;
16431 		vtotal_max = 4096;
16432 	}
16433 
16434 	if (mode->hdisplay > hdisplay_max ||
16435 	    mode->hsync_start > htotal_max ||
16436 	    mode->hsync_end > htotal_max ||
16437 	    mode->htotal > htotal_max)
16438 		return MODE_H_ILLEGAL;
16439 
16440 	if (mode->vdisplay > vdisplay_max ||
16441 	    mode->vsync_start > vtotal_max ||
16442 	    mode->vsync_end > vtotal_max ||
16443 	    mode->vtotal > vtotal_max)
16444 		return MODE_V_ILLEGAL;
16445 
16446 	if (INTEL_GEN(dev_priv) >= 5) {
16447 		if (mode->hdisplay < 64 ||
16448 		    mode->htotal - mode->hdisplay < 32)
16449 			return MODE_H_ILLEGAL;
16450 
16451 		if (mode->vtotal - mode->vdisplay < 5)
16452 			return MODE_V_ILLEGAL;
16453 	} else {
16454 		if (mode->htotal - mode->hdisplay < 32)
16455 			return MODE_H_ILLEGAL;
16456 
16457 		if (mode->vtotal - mode->vdisplay < 3)
16458 			return MODE_V_ILLEGAL;
16459 	}
16460 
16461 	return MODE_OK;
16462 }
16463 
16464 enum drm_mode_status
16465 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
16466 				const struct drm_display_mode *mode)
16467 {
16468 	int plane_width_max, plane_height_max;
16469 
16470 	/*
16471 	 * intel_mode_valid() should be
16472 	 * sufficient on older platforms.
16473 	 */
16474 	if (INTEL_GEN(dev_priv) < 9)
16475 		return MODE_OK;
16476 
16477 	/*
16478 	 * Most people will probably want a fullscreen
16479 	 * plane so let's not advertize modes that are
16480 	 * too big for that.
16481 	 */
16482 	if (INTEL_GEN(dev_priv) >= 11) {
16483 		plane_width_max = 5120;
16484 		plane_height_max = 4320;
16485 	} else {
16486 		plane_width_max = 5120;
16487 		plane_height_max = 4096;
16488 	}
16489 
16490 	if (mode->hdisplay > plane_width_max)
16491 		return MODE_H_ILLEGAL;
16492 
16493 	if (mode->vdisplay > plane_height_max)
16494 		return MODE_V_ILLEGAL;
16495 
16496 	return MODE_OK;
16497 }
16498 
16499 static const struct drm_mode_config_funcs intel_mode_funcs = {
16500 	.fb_create = intel_user_framebuffer_create,
16501 	.get_format_info = intel_get_format_info,
16502 	.output_poll_changed = intel_fbdev_output_poll_changed,
16503 	.mode_valid = intel_mode_valid,
16504 	.atomic_check = intel_atomic_check,
16505 	.atomic_commit = intel_atomic_commit,
16506 	.atomic_state_alloc = intel_atomic_state_alloc,
16507 	.atomic_state_clear = intel_atomic_state_clear,
16508 	.atomic_state_free = intel_atomic_state_free,
16509 };
16510 
16511 /**
16512  * intel_init_display_hooks - initialize the display modesetting hooks
16513  * @dev_priv: device private
16514  */
16515 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
16516 {
16517 	intel_init_cdclk_hooks(dev_priv);
16518 
16519 	if (INTEL_GEN(dev_priv) >= 9) {
16520 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16521 		dev_priv->display.get_initial_plane_config =
16522 			skylake_get_initial_plane_config;
16523 		dev_priv->display.crtc_compute_clock =
16524 			haswell_crtc_compute_clock;
16525 		dev_priv->display.crtc_enable = haswell_crtc_enable;
16526 		dev_priv->display.crtc_disable = haswell_crtc_disable;
16527 	} else if (HAS_DDI(dev_priv)) {
16528 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
16529 		dev_priv->display.get_initial_plane_config =
16530 			i9xx_get_initial_plane_config;
16531 		dev_priv->display.crtc_compute_clock =
16532 			haswell_crtc_compute_clock;
16533 		dev_priv->display.crtc_enable = haswell_crtc_enable;
16534 		dev_priv->display.crtc_disable = haswell_crtc_disable;
16535 	} else if (HAS_PCH_SPLIT(dev_priv)) {
16536 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
16537 		dev_priv->display.get_initial_plane_config =
16538 			i9xx_get_initial_plane_config;
16539 		dev_priv->display.crtc_compute_clock =
16540 			ironlake_crtc_compute_clock;
16541 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
16542 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
16543 	} else if (IS_CHERRYVIEW(dev_priv)) {
16544 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16545 		dev_priv->display.get_initial_plane_config =
16546 			i9xx_get_initial_plane_config;
16547 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
16548 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
16549 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16550 	} else if (IS_VALLEYVIEW(dev_priv)) {
16551 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16552 		dev_priv->display.get_initial_plane_config =
16553 			i9xx_get_initial_plane_config;
16554 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
16555 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
16556 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16557 	} else if (IS_G4X(dev_priv)) {
16558 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16559 		dev_priv->display.get_initial_plane_config =
16560 			i9xx_get_initial_plane_config;
16561 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
16562 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16563 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16564 	} else if (IS_PINEVIEW(dev_priv)) {
16565 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16566 		dev_priv->display.get_initial_plane_config =
16567 			i9xx_get_initial_plane_config;
16568 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
16569 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16570 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16571 	} else if (!IS_GEN(dev_priv, 2)) {
16572 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16573 		dev_priv->display.get_initial_plane_config =
16574 			i9xx_get_initial_plane_config;
16575 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
16576 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16577 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16578 	} else {
16579 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
16580 		dev_priv->display.get_initial_plane_config =
16581 			i9xx_get_initial_plane_config;
16582 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
16583 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
16584 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
16585 	}
16586 
16587 	if (IS_GEN(dev_priv, 5)) {
16588 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
16589 	} else if (IS_GEN(dev_priv, 6)) {
16590 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
16591 	} else if (IS_IVYBRIDGE(dev_priv)) {
16592 		/* FIXME: detect B0+ stepping and use auto training */
16593 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
16594 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
16595 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
16596 	}
16597 
16598 	if (INTEL_GEN(dev_priv) >= 9)
16599 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
16600 	else
16601 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
16602 
16603 }
16604 
16605 void intel_modeset_init_hw(struct drm_i915_private *i915)
16606 {
16607 	intel_update_cdclk(i915);
16608 	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
16609 	i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
16610 }
16611 
16612 /*
16613  * Calculate what we think the watermarks should be for the state we've read
16614  * out of the hardware and then immediately program those watermarks so that
16615  * we ensure the hardware settings match our internal state.
16616  *
16617  * We can calculate what we think WM's should be by creating a duplicate of the
16618  * current state (which was constructed during hardware readout) and running it
16619  * through the atomic check code to calculate new watermark values in the
16620  * state object.
16621  */
16622 static void sanitize_watermarks(struct drm_device *dev)
16623 {
16624 	struct drm_i915_private *dev_priv = to_i915(dev);
16625 	struct drm_atomic_state *state;
16626 	struct intel_atomic_state *intel_state;
16627 	struct intel_crtc *crtc;
16628 	struct intel_crtc_state *crtc_state;
16629 	struct drm_modeset_acquire_ctx ctx;
16630 	int ret;
16631 	int i;
16632 
16633 	/* Only supported on platforms that use atomic watermark design */
16634 	if (!dev_priv->display.optimize_watermarks)
16635 		return;
16636 
16637 	/*
16638 	 * We need to hold connection_mutex before calling duplicate_state so
16639 	 * that the connector loop is protected.
16640 	 */
16641 	drm_modeset_acquire_init(&ctx, 0);
16642 retry:
16643 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
16644 	if (ret == -EDEADLK) {
16645 		drm_modeset_backoff(&ctx);
16646 		goto retry;
16647 	} else if (WARN_ON(ret)) {
16648 		goto fail;
16649 	}
16650 
16651 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
16652 	if (WARN_ON(IS_ERR(state)))
16653 		goto fail;
16654 
16655 	intel_state = to_intel_atomic_state(state);
16656 
16657 	/*
16658 	 * Hardware readout is the only time we don't want to calculate
16659 	 * intermediate watermarks (since we don't trust the current
16660 	 * watermarks).
16661 	 */
16662 	if (!HAS_GMCH(dev_priv))
16663 		intel_state->skip_intermediate_wm = true;
16664 
16665 	ret = intel_atomic_check(dev, state);
16666 	if (ret) {
16667 		/*
16668 		 * If we fail here, it means that the hardware appears to be
16669 		 * programmed in a way that shouldn't be possible, given our
16670 		 * understanding of watermark requirements.  This might mean a
16671 		 * mistake in the hardware readout code or a mistake in the
16672 		 * watermark calculations for a given platform.  Raise a WARN
16673 		 * so that this is noticeable.
16674 		 *
16675 		 * If this actually happens, we'll have to just leave the
16676 		 * BIOS-programmed watermarks untouched and hope for the best.
16677 		 */
16678 		WARN(true, "Could not determine valid watermarks for inherited state\n");
16679 		goto put_state;
16680 	}
16681 
16682 	/* Write calculated watermark values back */
16683 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
16684 		crtc_state->wm.need_postvbl_update = true;
16685 		dev_priv->display.optimize_watermarks(intel_state, crtc_state);
16686 
16687 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
16688 	}
16689 
16690 put_state:
16691 	drm_atomic_state_put(state);
16692 fail:
16693 	drm_modeset_drop_locks(&ctx);
16694 	drm_modeset_acquire_fini(&ctx);
16695 }
16696 
16697 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
16698 {
16699 	if (IS_GEN(dev_priv, 5)) {
16700 		u32 fdi_pll_clk =
16701 			I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
16702 
16703 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
16704 	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
16705 		dev_priv->fdi_pll_freq = 270000;
16706 	} else {
16707 		return;
16708 	}
16709 
16710 	DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
16711 }
16712 
16713 static int intel_initial_commit(struct drm_device *dev)
16714 {
16715 	struct drm_atomic_state *state = NULL;
16716 	struct drm_modeset_acquire_ctx ctx;
16717 	struct drm_crtc *crtc;
16718 	struct drm_crtc_state *crtc_state;
16719 	int ret = 0;
16720 
16721 	state = drm_atomic_state_alloc(dev);
16722 	if (!state)
16723 		return -ENOMEM;
16724 
16725 	drm_modeset_acquire_init(&ctx, 0);
16726 
16727 retry:
16728 	state->acquire_ctx = &ctx;
16729 
16730 	drm_for_each_crtc(crtc, dev) {
16731 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
16732 		if (IS_ERR(crtc_state)) {
16733 			ret = PTR_ERR(crtc_state);
16734 			goto out;
16735 		}
16736 
16737 		if (crtc_state->active) {
16738 			ret = drm_atomic_add_affected_planes(state, crtc);
16739 			if (ret)
16740 				goto out;
16741 
16742 			/*
16743 			 * FIXME hack to force a LUT update to avoid the
16744 			 * plane update forcing the pipe gamma on without
16745 			 * having a proper LUT loaded. Remove once we
16746 			 * have readout for pipe gamma enable.
16747 			 */
16748 			crtc_state->color_mgmt_changed = true;
16749 		}
16750 	}
16751 
16752 	ret = drm_atomic_commit(state);
16753 
16754 out:
16755 	if (ret == -EDEADLK) {
16756 		drm_atomic_state_clear(state);
16757 		drm_modeset_backoff(&ctx);
16758 		goto retry;
16759 	}
16760 
16761 	drm_atomic_state_put(state);
16762 
16763 	drm_modeset_drop_locks(&ctx);
16764 	drm_modeset_acquire_fini(&ctx);
16765 
16766 	return ret;
16767 }
16768 
16769 static void intel_mode_config_init(struct drm_i915_private *i915)
16770 {
16771 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
16772 
16773 	drm_mode_config_init(&i915->drm);
16774 
16775 	mode_config->min_width = 0;
16776 	mode_config->min_height = 0;
16777 
16778 	mode_config->preferred_depth = 24;
16779 	mode_config->prefer_shadow = 1;
16780 
16781 	mode_config->allow_fb_modifiers = true;
16782 
16783 	mode_config->funcs = &intel_mode_funcs;
16784 
16785 	/*
16786 	 * Maximum framebuffer dimensions, chosen to match
16787 	 * the maximum render engine surface size on gen4+.
16788 	 */
16789 	if (INTEL_GEN(i915) >= 7) {
16790 		mode_config->max_width = 16384;
16791 		mode_config->max_height = 16384;
16792 	} else if (INTEL_GEN(i915) >= 4) {
16793 		mode_config->max_width = 8192;
16794 		mode_config->max_height = 8192;
16795 	} else if (IS_GEN(i915, 3)) {
16796 		mode_config->max_width = 4096;
16797 		mode_config->max_height = 4096;
16798 	} else {
16799 		mode_config->max_width = 2048;
16800 		mode_config->max_height = 2048;
16801 	}
16802 
16803 	if (IS_I845G(i915) || IS_I865G(i915)) {
16804 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
16805 		mode_config->cursor_height = 1023;
16806 	} else if (IS_GEN(i915, 2)) {
16807 		mode_config->cursor_width = 64;
16808 		mode_config->cursor_height = 64;
16809 	} else {
16810 		mode_config->cursor_width = 256;
16811 		mode_config->cursor_height = 256;
16812 	}
16813 }
16814 
16815 int intel_modeset_init(struct drm_i915_private *i915)
16816 {
16817 	struct drm_device *dev = &i915->drm;
16818 	enum pipe pipe;
16819 	struct intel_crtc *crtc;
16820 	int ret;
16821 
16822 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
16823 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
16824 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
16825 
16826 	intel_mode_config_init(i915);
16827 
16828 	ret = intel_bw_init(i915);
16829 	if (ret)
16830 		return ret;
16831 
16832 	init_llist_head(&i915->atomic_helper.free_list);
16833 	INIT_WORK(&i915->atomic_helper.free_work,
16834 		  intel_atomic_helper_free_state_worker);
16835 
16836 	intel_init_quirks(i915);
16837 
16838 	intel_fbc_init(i915);
16839 
16840 	intel_init_pm(i915);
16841 
16842 	intel_panel_sanitize_ssc(i915);
16843 
16844 	intel_gmbus_setup(i915);
16845 
16846 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
16847 		      INTEL_NUM_PIPES(i915),
16848 		      INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
16849 
16850 	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
16851 		for_each_pipe(i915, pipe) {
16852 			ret = intel_crtc_init(i915, pipe);
16853 			if (ret) {
16854 				drm_mode_config_cleanup(dev);
16855 				return ret;
16856 			}
16857 		}
16858 	}
16859 
16860 	intel_shared_dpll_init(dev);
16861 	intel_update_fdi_pll_freq(i915);
16862 
16863 	intel_update_czclk(i915);
16864 	intel_modeset_init_hw(i915);
16865 
16866 	intel_hdcp_component_init(i915);
16867 
16868 	if (i915->max_cdclk_freq == 0)
16869 		intel_update_max_cdclk(i915);
16870 
16871 	/* Just disable it once at startup */
16872 	intel_vga_disable(i915);
16873 	intel_setup_outputs(i915);
16874 
16875 	drm_modeset_lock_all(dev);
16876 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
16877 	drm_modeset_unlock_all(dev);
16878 
16879 	for_each_intel_crtc(dev, crtc) {
16880 		struct intel_initial_plane_config plane_config = {};
16881 
16882 		if (!crtc->active)
16883 			continue;
16884 
16885 		/*
16886 		 * Note that reserving the BIOS fb up front prevents us
16887 		 * from stuffing other stolen allocations like the ring
16888 		 * on top.  This prevents some ugliness at boot time, and
16889 		 * can even allow for smooth boot transitions if the BIOS
16890 		 * fb is large enough for the active pipe configuration.
16891 		 */
16892 		i915->display.get_initial_plane_config(crtc, &plane_config);
16893 
16894 		/*
16895 		 * If the fb is shared between multiple heads, we'll
16896 		 * just get the first one.
16897 		 */
16898 		intel_find_initial_plane_obj(crtc, &plane_config);
16899 	}
16900 
16901 	/*
16902 	 * Make sure hardware watermarks really match the state we read out.
16903 	 * Note that we need to do this after reconstructing the BIOS fb's
16904 	 * since the watermark calculation done here will use pstate->fb.
16905 	 */
16906 	if (!HAS_GMCH(i915))
16907 		sanitize_watermarks(dev);
16908 
16909 	/*
16910 	 * Force all active planes to recompute their states. So that on
16911 	 * mode_setcrtc after probe, all the intel_plane_state variables
16912 	 * are already calculated and there is no assert_plane warnings
16913 	 * during bootup.
16914 	 */
16915 	ret = intel_initial_commit(dev);
16916 	if (ret)
16917 		DRM_DEBUG_KMS("Initial commit in probe failed.\n");
16918 
16919 	return 0;
16920 }
16921 
16922 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16923 {
16924 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16925 	/* 640x480@60Hz, ~25175 kHz */
16926 	struct dpll clock = {
16927 		.m1 = 18,
16928 		.m2 = 7,
16929 		.p1 = 13,
16930 		.p2 = 4,
16931 		.n = 2,
16932 	};
16933 	u32 dpll, fp;
16934 	int i;
16935 
16936 	WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16937 
16938 	DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16939 		      pipe_name(pipe), clock.vco, clock.dot);
16940 
16941 	fp = i9xx_dpll_compute_fp(&clock);
16942 	dpll = DPLL_DVO_2X_MODE |
16943 		DPLL_VGA_MODE_DIS |
16944 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16945 		PLL_P2_DIVIDE_BY_4 |
16946 		PLL_REF_INPUT_DREFCLK |
16947 		DPLL_VCO_ENABLE;
16948 
16949 	I915_WRITE(FP0(pipe), fp);
16950 	I915_WRITE(FP1(pipe), fp);
16951 
16952 	I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16953 	I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16954 	I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16955 	I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16956 	I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16957 	I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16958 	I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16959 
16960 	/*
16961 	 * Apparently we need to have VGA mode enabled prior to changing
16962 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16963 	 * dividers, even though the register value does change.
16964 	 */
16965 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16966 	I915_WRITE(DPLL(pipe), dpll);
16967 
16968 	/* Wait for the clocks to stabilize. */
16969 	POSTING_READ(DPLL(pipe));
16970 	udelay(150);
16971 
16972 	/* The pixel multiplier can only be updated once the
16973 	 * DPLL is enabled and the clocks are stable.
16974 	 *
16975 	 * So write it again.
16976 	 */
16977 	I915_WRITE(DPLL(pipe), dpll);
16978 
16979 	/* We do this three times for luck */
16980 	for (i = 0; i < 3 ; i++) {
16981 		I915_WRITE(DPLL(pipe), dpll);
16982 		POSTING_READ(DPLL(pipe));
16983 		udelay(150); /* wait for warmup */
16984 	}
16985 
16986 	I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16987 	POSTING_READ(PIPECONF(pipe));
16988 
16989 	intel_wait_for_pipe_scanline_moving(crtc);
16990 }
16991 
16992 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16993 {
16994 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16995 
16996 	DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16997 		      pipe_name(pipe));
16998 
16999 	WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
17000 	WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
17001 	WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
17002 	WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
17003 	WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
17004 
17005 	I915_WRITE(PIPECONF(pipe), 0);
17006 	POSTING_READ(PIPECONF(pipe));
17007 
17008 	intel_wait_for_pipe_scanline_stopped(crtc);
17009 
17010 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
17011 	POSTING_READ(DPLL(pipe));
17012 }
17013 
17014 static void
17015 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
17016 {
17017 	struct intel_crtc *crtc;
17018 
17019 	if (INTEL_GEN(dev_priv) >= 4)
17020 		return;
17021 
17022 	for_each_intel_crtc(&dev_priv->drm, crtc) {
17023 		struct intel_plane *plane =
17024 			to_intel_plane(crtc->base.primary);
17025 		struct intel_crtc *plane_crtc;
17026 		enum pipe pipe;
17027 
17028 		if (!plane->get_hw_state(plane, &pipe))
17029 			continue;
17030 
17031 		if (pipe == crtc->pipe)
17032 			continue;
17033 
17034 		DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
17035 			      plane->base.base.id, plane->base.name);
17036 
17037 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17038 		intel_plane_disable_noatomic(plane_crtc, plane);
17039 	}
17040 }
17041 
17042 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
17043 {
17044 	struct drm_device *dev = crtc->base.dev;
17045 	struct intel_encoder *encoder;
17046 
17047 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
17048 		return true;
17049 
17050 	return false;
17051 }
17052 
17053 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
17054 {
17055 	struct drm_device *dev = encoder->base.dev;
17056 	struct intel_connector *connector;
17057 
17058 	for_each_connector_on_encoder(dev, &encoder->base, connector)
17059 		return connector;
17060 
17061 	return NULL;
17062 }
17063 
17064 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
17065 			      enum pipe pch_transcoder)
17066 {
17067 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
17068 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
17069 }
17070 
17071 static void intel_sanitize_crtc(struct intel_crtc *crtc,
17072 				struct drm_modeset_acquire_ctx *ctx)
17073 {
17074 	struct drm_device *dev = crtc->base.dev;
17075 	struct drm_i915_private *dev_priv = to_i915(dev);
17076 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
17077 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
17078 
17079 	/* Clear any frame start delays used for debugging left by the BIOS */
17080 	if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
17081 		i915_reg_t reg = PIPECONF(cpu_transcoder);
17082 
17083 		I915_WRITE(reg,
17084 			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
17085 	}
17086 
17087 	if (crtc_state->base.active) {
17088 		struct intel_plane *plane;
17089 
17090 		/* Disable everything but the primary plane */
17091 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
17092 			const struct intel_plane_state *plane_state =
17093 				to_intel_plane_state(plane->base.state);
17094 
17095 			if (plane_state->base.visible &&
17096 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
17097 				intel_plane_disable_noatomic(crtc, plane);
17098 		}
17099 
17100 		/*
17101 		 * Disable any background color set by the BIOS, but enable the
17102 		 * gamma and CSC to match how we program our planes.
17103 		 */
17104 		if (INTEL_GEN(dev_priv) >= 9)
17105 			I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
17106 				   SKL_BOTTOM_COLOR_GAMMA_ENABLE |
17107 				   SKL_BOTTOM_COLOR_CSC_ENABLE);
17108 	}
17109 
17110 	/* Adjust the state of the output pipe according to whether we
17111 	 * have active connectors/encoders. */
17112 	if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
17113 		intel_crtc_disable_noatomic(&crtc->base, ctx);
17114 
17115 	if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
17116 		/*
17117 		 * We start out with underrun reporting disabled to avoid races.
17118 		 * For correct bookkeeping mark this on active crtcs.
17119 		 *
17120 		 * Also on gmch platforms we dont have any hardware bits to
17121 		 * disable the underrun reporting. Which means we need to start
17122 		 * out with underrun reporting disabled also on inactive pipes,
17123 		 * since otherwise we'll complain about the garbage we read when
17124 		 * e.g. coming up after runtime pm.
17125 		 *
17126 		 * No protection against concurrent access is required - at
17127 		 * worst a fifo underrun happens which also sets this to false.
17128 		 */
17129 		crtc->cpu_fifo_underrun_disabled = true;
17130 		/*
17131 		 * We track the PCH trancoder underrun reporting state
17132 		 * within the crtc. With crtc for pipe A housing the underrun
17133 		 * reporting state for PCH transcoder A, crtc for pipe B housing
17134 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
17135 		 * and marking underrun reporting as disabled for the non-existing
17136 		 * PCH transcoders B and C would prevent enabling the south
17137 		 * error interrupt (see cpt_can_enable_serr_int()).
17138 		 */
17139 		if (has_pch_trancoder(dev_priv, crtc->pipe))
17140 			crtc->pch_fifo_underrun_disabled = true;
17141 	}
17142 }
17143 
17144 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
17145 {
17146 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
17147 
17148 	/*
17149 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
17150 	 * the hardware when a high res displays plugged in. DPLL P
17151 	 * divider is zero, and the pipe timings are bonkers. We'll
17152 	 * try to disable everything in that case.
17153 	 *
17154 	 * FIXME would be nice to be able to sanitize this state
17155 	 * without several WARNs, but for now let's take the easy
17156 	 * road.
17157 	 */
17158 	return IS_GEN(dev_priv, 6) &&
17159 		crtc_state->base.active &&
17160 		crtc_state->shared_dpll &&
17161 		crtc_state->port_clock == 0;
17162 }
17163 
17164 static void intel_sanitize_encoder(struct intel_encoder *encoder)
17165 {
17166 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
17167 	struct intel_connector *connector;
17168 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
17169 	struct intel_crtc_state *crtc_state = crtc ?
17170 		to_intel_crtc_state(crtc->base.state) : NULL;
17171 
17172 	/* We need to check both for a crtc link (meaning that the
17173 	 * encoder is active and trying to read from a pipe) and the
17174 	 * pipe itself being active. */
17175 	bool has_active_crtc = crtc_state &&
17176 		crtc_state->base.active;
17177 
17178 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
17179 		DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
17180 			      pipe_name(crtc->pipe));
17181 		has_active_crtc = false;
17182 	}
17183 
17184 	connector = intel_encoder_find_connector(encoder);
17185 	if (connector && !has_active_crtc) {
17186 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
17187 			      encoder->base.base.id,
17188 			      encoder->base.name);
17189 
17190 		/* Connector is active, but has no active pipe. This is
17191 		 * fallout from our resume register restoring. Disable
17192 		 * the encoder manually again. */
17193 		if (crtc_state) {
17194 			struct drm_encoder *best_encoder;
17195 
17196 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
17197 				      encoder->base.base.id,
17198 				      encoder->base.name);
17199 
17200 			/* avoid oopsing in case the hooks consult best_encoder */
17201 			best_encoder = connector->base.state->best_encoder;
17202 			connector->base.state->best_encoder = &encoder->base;
17203 
17204 			if (encoder->disable)
17205 				encoder->disable(encoder, crtc_state,
17206 						 connector->base.state);
17207 			if (encoder->post_disable)
17208 				encoder->post_disable(encoder, crtc_state,
17209 						      connector->base.state);
17210 
17211 			connector->base.state->best_encoder = best_encoder;
17212 		}
17213 		encoder->base.crtc = NULL;
17214 
17215 		/* Inconsistent output/port/pipe state happens presumably due to
17216 		 * a bug in one of the get_hw_state functions. Or someplace else
17217 		 * in our code, like the register restore mess on resume. Clamp
17218 		 * things to off as a safer default. */
17219 
17220 		connector->base.dpms = DRM_MODE_DPMS_OFF;
17221 		connector->base.encoder = NULL;
17222 	}
17223 
17224 	/* notify opregion of the sanitized encoder state */
17225 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
17226 
17227 	if (INTEL_GEN(dev_priv) >= 11)
17228 		icl_sanitize_encoder_pll_mapping(encoder);
17229 }
17230 
17231 /* FIXME read out full plane state for all planes */
17232 static void readout_plane_state(struct drm_i915_private *dev_priv)
17233 {
17234 	struct intel_plane *plane;
17235 	struct intel_crtc *crtc;
17236 
17237 	for_each_intel_plane(&dev_priv->drm, plane) {
17238 		struct intel_plane_state *plane_state =
17239 			to_intel_plane_state(plane->base.state);
17240 		struct intel_crtc_state *crtc_state;
17241 		enum pipe pipe = PIPE_A;
17242 		bool visible;
17243 
17244 		visible = plane->get_hw_state(plane, &pipe);
17245 
17246 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17247 		crtc_state = to_intel_crtc_state(crtc->base.state);
17248 
17249 		intel_set_plane_visible(crtc_state, plane_state, visible);
17250 
17251 		DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
17252 			      plane->base.base.id, plane->base.name,
17253 			      enableddisabled(visible), pipe_name(pipe));
17254 	}
17255 
17256 	for_each_intel_crtc(&dev_priv->drm, crtc) {
17257 		struct intel_crtc_state *crtc_state =
17258 			to_intel_crtc_state(crtc->base.state);
17259 
17260 		fixup_active_planes(crtc_state);
17261 	}
17262 }
17263 
17264 static void intel_modeset_readout_hw_state(struct drm_device *dev)
17265 {
17266 	struct drm_i915_private *dev_priv = to_i915(dev);
17267 	enum pipe pipe;
17268 	struct intel_crtc *crtc;
17269 	struct intel_encoder *encoder;
17270 	struct intel_connector *connector;
17271 	struct drm_connector_list_iter conn_iter;
17272 	int i;
17273 
17274 	dev_priv->active_pipes = 0;
17275 
17276 	for_each_intel_crtc(dev, crtc) {
17277 		struct intel_crtc_state *crtc_state =
17278 			to_intel_crtc_state(crtc->base.state);
17279 
17280 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
17281 		memset(crtc_state, 0, sizeof(*crtc_state));
17282 		__drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
17283 
17284 		crtc_state->base.active = crtc_state->base.enable =
17285 			dev_priv->display.get_pipe_config(crtc, crtc_state);
17286 
17287 		crtc->base.enabled = crtc_state->base.enable;
17288 		crtc->active = crtc_state->base.active;
17289 
17290 		if (crtc_state->base.active)
17291 			dev_priv->active_pipes |= BIT(crtc->pipe);
17292 
17293 		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
17294 			      crtc->base.base.id, crtc->base.name,
17295 			      enableddisabled(crtc_state->base.active));
17296 	}
17297 
17298 	readout_plane_state(dev_priv);
17299 
17300 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17301 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17302 
17303 		pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
17304 							&pll->state.hw_state);
17305 
17306 		if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
17307 		    pll->info->id == DPLL_ID_EHL_DPLL4) {
17308 			pll->wakeref = intel_display_power_get(dev_priv,
17309 							       POWER_DOMAIN_DPLL_DC_OFF);
17310 		}
17311 
17312 		pll->state.crtc_mask = 0;
17313 		for_each_intel_crtc(dev, crtc) {
17314 			struct intel_crtc_state *crtc_state =
17315 				to_intel_crtc_state(crtc->base.state);
17316 
17317 			if (crtc_state->base.active &&
17318 			    crtc_state->shared_dpll == pll)
17319 				pll->state.crtc_mask |= 1 << crtc->pipe;
17320 		}
17321 		pll->active_mask = pll->state.crtc_mask;
17322 
17323 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
17324 			      pll->info->name, pll->state.crtc_mask, pll->on);
17325 	}
17326 
17327 	for_each_intel_encoder(dev, encoder) {
17328 		pipe = 0;
17329 
17330 		if (encoder->get_hw_state(encoder, &pipe)) {
17331 			struct intel_crtc_state *crtc_state;
17332 
17333 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17334 			crtc_state = to_intel_crtc_state(crtc->base.state);
17335 
17336 			encoder->base.crtc = &crtc->base;
17337 			encoder->get_config(encoder, crtc_state);
17338 		} else {
17339 			encoder->base.crtc = NULL;
17340 		}
17341 
17342 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
17343 			      encoder->base.base.id, encoder->base.name,
17344 			      enableddisabled(encoder->base.crtc),
17345 			      pipe_name(pipe));
17346 	}
17347 
17348 	drm_connector_list_iter_begin(dev, &conn_iter);
17349 	for_each_intel_connector_iter(connector, &conn_iter) {
17350 		if (connector->get_hw_state(connector)) {
17351 			struct intel_crtc_state *crtc_state;
17352 			struct intel_crtc *crtc;
17353 
17354 			connector->base.dpms = DRM_MODE_DPMS_ON;
17355 
17356 			encoder = connector->encoder;
17357 			connector->base.encoder = &encoder->base;
17358 
17359 			crtc = to_intel_crtc(encoder->base.crtc);
17360 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
17361 
17362 			if (crtc_state && crtc_state->base.active) {
17363 				/*
17364 				 * This has to be done during hardware readout
17365 				 * because anything calling .crtc_disable may
17366 				 * rely on the connector_mask being accurate.
17367 				 */
17368 				crtc_state->base.connector_mask |=
17369 					drm_connector_mask(&connector->base);
17370 				crtc_state->base.encoder_mask |=
17371 					drm_encoder_mask(&encoder->base);
17372 			}
17373 		} else {
17374 			connector->base.dpms = DRM_MODE_DPMS_OFF;
17375 			connector->base.encoder = NULL;
17376 		}
17377 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
17378 			      connector->base.base.id, connector->base.name,
17379 			      enableddisabled(connector->base.encoder));
17380 	}
17381 	drm_connector_list_iter_end(&conn_iter);
17382 
17383 	for_each_intel_crtc(dev, crtc) {
17384 		struct intel_bw_state *bw_state =
17385 			to_intel_bw_state(dev_priv->bw_obj.state);
17386 		struct intel_crtc_state *crtc_state =
17387 			to_intel_crtc_state(crtc->base.state);
17388 		struct intel_plane *plane;
17389 		int min_cdclk = 0;
17390 
17391 		if (crtc_state->base.active) {
17392 			struct drm_display_mode mode;
17393 
17394 			intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode,
17395 						    crtc_state);
17396 
17397 			mode = crtc_state->base.adjusted_mode;
17398 			mode.hdisplay = crtc_state->pipe_src_w;
17399 			mode.vdisplay = crtc_state->pipe_src_h;
17400 			WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode));
17401 
17402 			/*
17403 			 * The initial mode needs to be set in order to keep
17404 			 * the atomic core happy. It wants a valid mode if the
17405 			 * crtc's enabled, so we do the above call.
17406 			 *
17407 			 * But we don't set all the derived state fully, hence
17408 			 * set a flag to indicate that a full recalculation is
17409 			 * needed on the next commit.
17410 			 */
17411 			crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
17412 
17413 			intel_crtc_compute_pixel_rate(crtc_state);
17414 
17415 			intel_crtc_update_active_timings(crtc_state);
17416 		}
17417 
17418 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
17419 			const struct intel_plane_state *plane_state =
17420 				to_intel_plane_state(plane->base.state);
17421 
17422 			/*
17423 			 * FIXME don't have the fb yet, so can't
17424 			 * use intel_plane_data_rate() :(
17425 			 */
17426 			if (plane_state->base.visible)
17427 				crtc_state->data_rate[plane->id] =
17428 					4 * crtc_state->pixel_rate;
17429 			/*
17430 			 * FIXME don't have the fb yet, so can't
17431 			 * use plane->min_cdclk() :(
17432 			 */
17433 			if (plane_state->base.visible && plane->min_cdclk) {
17434 				if (crtc_state->double_wide ||
17435 				    INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
17436 					crtc_state->min_cdclk[plane->id] =
17437 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
17438 				else
17439 					crtc_state->min_cdclk[plane->id] =
17440 						crtc_state->pixel_rate;
17441 			}
17442 			DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
17443 				      plane->base.base.id, plane->base.name,
17444 				      crtc_state->min_cdclk[plane->id]);
17445 		}
17446 
17447 		if (crtc_state->base.active) {
17448 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
17449 			if (WARN_ON(min_cdclk < 0))
17450 				min_cdclk = 0;
17451 		}
17452 
17453 		dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
17454 		dev_priv->min_voltage_level[crtc->pipe] =
17455 			crtc_state->min_voltage_level;
17456 
17457 		intel_bw_crtc_update(bw_state, crtc_state);
17458 
17459 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
17460 	}
17461 }
17462 
17463 static void
17464 get_encoder_power_domains(struct drm_i915_private *dev_priv)
17465 {
17466 	struct intel_encoder *encoder;
17467 
17468 	for_each_intel_encoder(&dev_priv->drm, encoder) {
17469 		struct intel_crtc_state *crtc_state;
17470 
17471 		if (!encoder->get_power_domains)
17472 			continue;
17473 
17474 		/*
17475 		 * MST-primary and inactive encoders don't have a crtc state
17476 		 * and neither of these require any power domain references.
17477 		 */
17478 		if (!encoder->base.crtc)
17479 			continue;
17480 
17481 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
17482 		encoder->get_power_domains(encoder, crtc_state);
17483 	}
17484 }
17485 
17486 static void intel_early_display_was(struct drm_i915_private *dev_priv)
17487 {
17488 	/* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
17489 	if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
17490 		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
17491 			   DARBF_GATING_DIS);
17492 
17493 	if (IS_HASWELL(dev_priv)) {
17494 		/*
17495 		 * WaRsPkgCStateDisplayPMReq:hsw
17496 		 * System hang if this isn't done before disabling all planes!
17497 		 */
17498 		I915_WRITE(CHICKEN_PAR1_1,
17499 			   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
17500 	}
17501 }
17502 
17503 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
17504 				       enum port port, i915_reg_t hdmi_reg)
17505 {
17506 	u32 val = I915_READ(hdmi_reg);
17507 
17508 	if (val & SDVO_ENABLE ||
17509 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
17510 		return;
17511 
17512 	DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
17513 		      port_name(port));
17514 
17515 	val &= ~SDVO_PIPE_SEL_MASK;
17516 	val |= SDVO_PIPE_SEL(PIPE_A);
17517 
17518 	I915_WRITE(hdmi_reg, val);
17519 }
17520 
17521 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
17522 				     enum port port, i915_reg_t dp_reg)
17523 {
17524 	u32 val = I915_READ(dp_reg);
17525 
17526 	if (val & DP_PORT_EN ||
17527 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
17528 		return;
17529 
17530 	DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
17531 		      port_name(port));
17532 
17533 	val &= ~DP_PIPE_SEL_MASK;
17534 	val |= DP_PIPE_SEL(PIPE_A);
17535 
17536 	I915_WRITE(dp_reg, val);
17537 }
17538 
17539 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
17540 {
17541 	/*
17542 	 * The BIOS may select transcoder B on some of the PCH
17543 	 * ports even it doesn't enable the port. This would trip
17544 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
17545 	 * Sanitize the transcoder select bits to prevent that. We
17546 	 * assume that the BIOS never actually enabled the port,
17547 	 * because if it did we'd actually have to toggle the port
17548 	 * on and back off to make the transcoder A select stick
17549 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
17550 	 * intel_disable_sdvo()).
17551 	 */
17552 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
17553 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
17554 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
17555 
17556 	/* PCH SDVOB multiplex with HDMIB */
17557 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
17558 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
17559 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
17560 }
17561 
17562 /* Scan out the current hw modeset state,
17563  * and sanitizes it to the current state
17564  */
17565 static void
17566 intel_modeset_setup_hw_state(struct drm_device *dev,
17567 			     struct drm_modeset_acquire_ctx *ctx)
17568 {
17569 	struct drm_i915_private *dev_priv = to_i915(dev);
17570 	struct intel_crtc_state *crtc_state;
17571 	struct intel_encoder *encoder;
17572 	struct intel_crtc *crtc;
17573 	intel_wakeref_t wakeref;
17574 	int i;
17575 
17576 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
17577 
17578 	intel_early_display_was(dev_priv);
17579 	intel_modeset_readout_hw_state(dev);
17580 
17581 	/* HW state is read out, now we need to sanitize this mess. */
17582 
17583 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
17584 	for_each_intel_encoder(dev, encoder) {
17585 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
17586 
17587 		/* We need to sanitize only the MST primary port. */
17588 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
17589 		    intel_phy_is_tc(dev_priv, phy))
17590 			intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
17591 	}
17592 
17593 	get_encoder_power_domains(dev_priv);
17594 
17595 	if (HAS_PCH_IBX(dev_priv))
17596 		ibx_sanitize_pch_ports(dev_priv);
17597 
17598 	/*
17599 	 * intel_sanitize_plane_mapping() may need to do vblank
17600 	 * waits, so we need vblank interrupts restored beforehand.
17601 	 */
17602 	for_each_intel_crtc(&dev_priv->drm, crtc) {
17603 		crtc_state = to_intel_crtc_state(crtc->base.state);
17604 
17605 		drm_crtc_vblank_reset(&crtc->base);
17606 
17607 		if (crtc_state->base.active)
17608 			intel_crtc_vblank_on(crtc_state);
17609 	}
17610 
17611 	intel_sanitize_plane_mapping(dev_priv);
17612 
17613 	for_each_intel_encoder(dev, encoder)
17614 		intel_sanitize_encoder(encoder);
17615 
17616 	for_each_intel_crtc(&dev_priv->drm, crtc) {
17617 		crtc_state = to_intel_crtc_state(crtc->base.state);
17618 		intel_sanitize_crtc(crtc, ctx);
17619 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
17620 	}
17621 
17622 	intel_modeset_update_connector_atomic_state(dev);
17623 
17624 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
17625 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
17626 
17627 		if (!pll->on || pll->active_mask)
17628 			continue;
17629 
17630 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
17631 			      pll->info->name);
17632 
17633 		pll->info->funcs->disable(dev_priv, pll);
17634 		pll->on = false;
17635 	}
17636 
17637 	if (IS_G4X(dev_priv)) {
17638 		g4x_wm_get_hw_state(dev_priv);
17639 		g4x_wm_sanitize(dev_priv);
17640 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17641 		vlv_wm_get_hw_state(dev_priv);
17642 		vlv_wm_sanitize(dev_priv);
17643 	} else if (INTEL_GEN(dev_priv) >= 9) {
17644 		skl_wm_get_hw_state(dev_priv);
17645 	} else if (HAS_PCH_SPLIT(dev_priv)) {
17646 		ilk_wm_get_hw_state(dev_priv);
17647 	}
17648 
17649 	for_each_intel_crtc(dev, crtc) {
17650 		u64 put_domains;
17651 
17652 		crtc_state = to_intel_crtc_state(crtc->base.state);
17653 		put_domains = modeset_get_crtc_power_domains(crtc_state);
17654 		if (WARN_ON(put_domains))
17655 			modeset_put_power_domains(dev_priv, put_domains);
17656 	}
17657 
17658 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
17659 
17660 	intel_fbc_init_pipe_state(dev_priv);
17661 }
17662 
17663 void intel_display_resume(struct drm_device *dev)
17664 {
17665 	struct drm_i915_private *dev_priv = to_i915(dev);
17666 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
17667 	struct drm_modeset_acquire_ctx ctx;
17668 	int ret;
17669 
17670 	dev_priv->modeset_restore_state = NULL;
17671 	if (state)
17672 		state->acquire_ctx = &ctx;
17673 
17674 	drm_modeset_acquire_init(&ctx, 0);
17675 
17676 	while (1) {
17677 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
17678 		if (ret != -EDEADLK)
17679 			break;
17680 
17681 		drm_modeset_backoff(&ctx);
17682 	}
17683 
17684 	if (!ret)
17685 		ret = __intel_display_resume(dev, state, &ctx);
17686 
17687 	intel_enable_ipc(dev_priv);
17688 	drm_modeset_drop_locks(&ctx);
17689 	drm_modeset_acquire_fini(&ctx);
17690 
17691 	if (ret)
17692 		DRM_ERROR("Restoring old state failed with %i\n", ret);
17693 	if (state)
17694 		drm_atomic_state_put(state);
17695 }
17696 
17697 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
17698 {
17699 	struct intel_connector *connector;
17700 	struct drm_connector_list_iter conn_iter;
17701 
17702 	/* Kill all the work that may have been queued by hpd. */
17703 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
17704 	for_each_intel_connector_iter(connector, &conn_iter) {
17705 		if (connector->modeset_retry_work.func)
17706 			cancel_work_sync(&connector->modeset_retry_work);
17707 		if (connector->hdcp.shim) {
17708 			cancel_delayed_work_sync(&connector->hdcp.check_work);
17709 			cancel_work_sync(&connector->hdcp.prop_work);
17710 		}
17711 	}
17712 	drm_connector_list_iter_end(&conn_iter);
17713 }
17714 
17715 void intel_modeset_driver_remove(struct drm_i915_private *i915)
17716 {
17717 	flush_workqueue(i915->flip_wq);
17718 	flush_workqueue(i915->modeset_wq);
17719 
17720 	flush_work(&i915->atomic_helper.free_work);
17721 	WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
17722 
17723 	/*
17724 	 * Interrupts and polling as the first thing to avoid creating havoc.
17725 	 * Too much stuff here (turning of connectors, ...) would
17726 	 * experience fancy races otherwise.
17727 	 */
17728 	intel_irq_uninstall(i915);
17729 
17730 	/*
17731 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
17732 	 * poll handlers. Hence disable polling after hpd handling is shut down.
17733 	 */
17734 	intel_hpd_poll_fini(i915);
17735 
17736 	/* poll work can call into fbdev, hence clean that up afterwards */
17737 	intel_fbdev_fini(i915);
17738 
17739 	intel_unregister_dsm_handler();
17740 
17741 	intel_fbc_global_disable(i915);
17742 
17743 	/* flush any delayed tasks or pending work */
17744 	flush_scheduled_work();
17745 
17746 	intel_hdcp_component_fini(i915);
17747 
17748 	drm_mode_config_cleanup(&i915->drm);
17749 
17750 	intel_overlay_cleanup(i915);
17751 
17752 	intel_gmbus_teardown(i915);
17753 
17754 	destroy_workqueue(i915->flip_wq);
17755 	destroy_workqueue(i915->modeset_wq);
17756 
17757 	intel_fbc_cleanup_cfb(i915);
17758 }
17759 
17760 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
17761 
17762 struct intel_display_error_state {
17763 
17764 	u32 power_well_driver;
17765 
17766 	struct intel_cursor_error_state {
17767 		u32 control;
17768 		u32 position;
17769 		u32 base;
17770 		u32 size;
17771 	} cursor[I915_MAX_PIPES];
17772 
17773 	struct intel_pipe_error_state {
17774 		bool power_domain_on;
17775 		u32 source;
17776 		u32 stat;
17777 	} pipe[I915_MAX_PIPES];
17778 
17779 	struct intel_plane_error_state {
17780 		u32 control;
17781 		u32 stride;
17782 		u32 size;
17783 		u32 pos;
17784 		u32 addr;
17785 		u32 surface;
17786 		u32 tile_offset;
17787 	} plane[I915_MAX_PIPES];
17788 
17789 	struct intel_transcoder_error_state {
17790 		bool available;
17791 		bool power_domain_on;
17792 		enum transcoder cpu_transcoder;
17793 
17794 		u32 conf;
17795 
17796 		u32 htotal;
17797 		u32 hblank;
17798 		u32 hsync;
17799 		u32 vtotal;
17800 		u32 vblank;
17801 		u32 vsync;
17802 	} transcoder[5];
17803 };
17804 
17805 struct intel_display_error_state *
17806 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17807 {
17808 	struct intel_display_error_state *error;
17809 	int transcoders[] = {
17810 		TRANSCODER_A,
17811 		TRANSCODER_B,
17812 		TRANSCODER_C,
17813 		TRANSCODER_D,
17814 		TRANSCODER_EDP,
17815 	};
17816 	int i;
17817 
17818 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17819 
17820 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
17821 		return NULL;
17822 
17823 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
17824 	if (error == NULL)
17825 		return NULL;
17826 
17827 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17828 		error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17829 
17830 	for_each_pipe(dev_priv, i) {
17831 		error->pipe[i].power_domain_on =
17832 			__intel_display_power_is_enabled(dev_priv,
17833 							 POWER_DOMAIN_PIPE(i));
17834 		if (!error->pipe[i].power_domain_on)
17835 			continue;
17836 
17837 		error->cursor[i].control = I915_READ(CURCNTR(i));
17838 		error->cursor[i].position = I915_READ(CURPOS(i));
17839 		error->cursor[i].base = I915_READ(CURBASE(i));
17840 
17841 		error->plane[i].control = I915_READ(DSPCNTR(i));
17842 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17843 		if (INTEL_GEN(dev_priv) <= 3) {
17844 			error->plane[i].size = I915_READ(DSPSIZE(i));
17845 			error->plane[i].pos = I915_READ(DSPPOS(i));
17846 		}
17847 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17848 			error->plane[i].addr = I915_READ(DSPADDR(i));
17849 		if (INTEL_GEN(dev_priv) >= 4) {
17850 			error->plane[i].surface = I915_READ(DSPSURF(i));
17851 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17852 		}
17853 
17854 		error->pipe[i].source = I915_READ(PIPESRC(i));
17855 
17856 		if (HAS_GMCH(dev_priv))
17857 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
17858 	}
17859 
17860 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17861 		enum transcoder cpu_transcoder = transcoders[i];
17862 
17863 		if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
17864 			continue;
17865 
17866 		error->transcoder[i].available = true;
17867 		error->transcoder[i].power_domain_on =
17868 			__intel_display_power_is_enabled(dev_priv,
17869 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
17870 		if (!error->transcoder[i].power_domain_on)
17871 			continue;
17872 
17873 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
17874 
17875 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
17876 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
17877 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
17878 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
17879 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
17880 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
17881 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
17882 	}
17883 
17884 	return error;
17885 }
17886 
17887 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
17888 
17889 void
17890 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
17891 				struct intel_display_error_state *error)
17892 {
17893 	struct drm_i915_private *dev_priv = m->i915;
17894 	int i;
17895 
17896 	if (!error)
17897 		return;
17898 
17899 	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
17900 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17901 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
17902 			   error->power_well_driver);
17903 	for_each_pipe(dev_priv, i) {
17904 		err_printf(m, "Pipe [%d]:\n", i);
17905 		err_printf(m, "  Power: %s\n",
17906 			   onoff(error->pipe[i].power_domain_on));
17907 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
17908 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
17909 
17910 		err_printf(m, "Plane [%d]:\n", i);
17911 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
17912 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
17913 		if (INTEL_GEN(dev_priv) <= 3) {
17914 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
17915 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
17916 		}
17917 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17918 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
17919 		if (INTEL_GEN(dev_priv) >= 4) {
17920 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
17921 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
17922 		}
17923 
17924 		err_printf(m, "Cursor [%d]:\n", i);
17925 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
17926 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
17927 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
17928 	}
17929 
17930 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17931 		if (!error->transcoder[i].available)
17932 			continue;
17933 
17934 		err_printf(m, "CPU transcoder: %s\n",
17935 			   transcoder_name(error->transcoder[i].cpu_transcoder));
17936 		err_printf(m, "  Power: %s\n",
17937 			   onoff(error->transcoder[i].power_domain_on));
17938 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
17939 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
17940 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
17941 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
17942 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
17943 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
17944 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
17945 	}
17946 }
17947 
17948 #endif
17949