1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46 
47 #include "display/intel_crt.h"
48 #include "display/intel_ddi.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dsi.h"
51 #include "display/intel_dvo.h"
52 #include "display/intel_gmbus.h"
53 #include "display/intel_hdmi.h"
54 #include "display/intel_lvds.h"
55 #include "display/intel_sdvo.h"
56 #include "display/intel_tv.h"
57 #include "display/intel_vdsc.h"
58 
59 #include "i915_drv.h"
60 #include "i915_trace.h"
61 #include "intel_acpi.h"
62 #include "intel_atomic.h"
63 #include "intel_atomic_plane.h"
64 #include "intel_bw.h"
65 #include "intel_color.h"
66 #include "intel_cdclk.h"
67 #include "intel_drv.h"
68 #include "intel_fbc.h"
69 #include "intel_fbdev.h"
70 #include "intel_fifo_underrun.h"
71 #include "intel_frontbuffer.h"
72 #include "intel_hdcp.h"
73 #include "intel_hotplug.h"
74 #include "intel_overlay.h"
75 #include "intel_pipe_crc.h"
76 #include "intel_pm.h"
77 #include "intel_psr.h"
78 #include "intel_quirks.h"
79 #include "intel_sideband.h"
80 #include "intel_sprite.h"
81 
82 /* Primary plane formats for gen <= 3 */
83 static const u32 i8xx_primary_formats[] = {
84 	DRM_FORMAT_C8,
85 	DRM_FORMAT_RGB565,
86 	DRM_FORMAT_XRGB1555,
87 	DRM_FORMAT_XRGB8888,
88 };
89 
90 /* Primary plane formats for gen >= 4 */
91 static const u32 i965_primary_formats[] = {
92 	DRM_FORMAT_C8,
93 	DRM_FORMAT_RGB565,
94 	DRM_FORMAT_XRGB8888,
95 	DRM_FORMAT_XBGR8888,
96 	DRM_FORMAT_XRGB2101010,
97 	DRM_FORMAT_XBGR2101010,
98 };
99 
100 static const u64 i9xx_format_modifiers[] = {
101 	I915_FORMAT_MOD_X_TILED,
102 	DRM_FORMAT_MOD_LINEAR,
103 	DRM_FORMAT_MOD_INVALID
104 };
105 
106 /* Cursor formats */
107 static const u32 intel_cursor_formats[] = {
108 	DRM_FORMAT_ARGB8888,
109 };
110 
111 static const u64 cursor_format_modifiers[] = {
112 	DRM_FORMAT_MOD_LINEAR,
113 	DRM_FORMAT_MOD_INVALID
114 };
115 
116 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
117 				struct intel_crtc_state *pipe_config);
118 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
119 				   struct intel_crtc_state *pipe_config);
120 
121 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
122 				  struct drm_i915_gem_object *obj,
123 				  struct drm_mode_fb_cmd2 *mode_cmd);
124 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
125 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
126 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
127 					 const struct intel_link_m_n *m_n,
128 					 const struct intel_link_m_n *m2_n2);
129 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
130 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
132 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
133 static void vlv_prepare_pll(struct intel_crtc *crtc,
134 			    const struct intel_crtc_state *pipe_config);
135 static void chv_prepare_pll(struct intel_crtc *crtc,
136 			    const struct intel_crtc_state *pipe_config);
137 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
138 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
139 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
140 				    struct intel_crtc_state *crtc_state);
141 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
142 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
143 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
144 static void intel_modeset_setup_hw_state(struct drm_device *dev,
145 					 struct drm_modeset_acquire_ctx *ctx);
146 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
147 
148 struct intel_limit {
149 	struct {
150 		int min, max;
151 	} dot, vco, n, m, m1, m2, p, p1;
152 
153 	struct {
154 		int dot_limit;
155 		int p2_slow, p2_fast;
156 	} p2;
157 };
158 
159 /* returns HPLL frequency in kHz */
160 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
161 {
162 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
163 
164 	/* Obtain SKU information */
165 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
166 		CCK_FUSE_HPLL_FREQ_MASK;
167 
168 	return vco_freq[hpll_freq] * 1000;
169 }
170 
171 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
172 		      const char *name, u32 reg, int ref_freq)
173 {
174 	u32 val;
175 	int divider;
176 
177 	val = vlv_cck_read(dev_priv, reg);
178 	divider = val & CCK_FREQUENCY_VALUES;
179 
180 	WARN((val & CCK_FREQUENCY_STATUS) !=
181 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
182 	     "%s change in progress\n", name);
183 
184 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
185 }
186 
187 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
188 			   const char *name, u32 reg)
189 {
190 	int hpll;
191 
192 	vlv_cck_get(dev_priv);
193 
194 	if (dev_priv->hpll_freq == 0)
195 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
196 
197 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
198 
199 	vlv_cck_put(dev_priv);
200 
201 	return hpll;
202 }
203 
204 static void intel_update_czclk(struct drm_i915_private *dev_priv)
205 {
206 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
207 		return;
208 
209 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
210 						      CCK_CZ_CLOCK_CONTROL);
211 
212 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
213 }
214 
215 static inline u32 /* units of 100MHz */
216 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
217 		    const struct intel_crtc_state *pipe_config)
218 {
219 	if (HAS_DDI(dev_priv))
220 		return pipe_config->port_clock; /* SPLL */
221 	else
222 		return dev_priv->fdi_pll_freq;
223 }
224 
225 static const struct intel_limit intel_limits_i8xx_dac = {
226 	.dot = { .min = 25000, .max = 350000 },
227 	.vco = { .min = 908000, .max = 1512000 },
228 	.n = { .min = 2, .max = 16 },
229 	.m = { .min = 96, .max = 140 },
230 	.m1 = { .min = 18, .max = 26 },
231 	.m2 = { .min = 6, .max = 16 },
232 	.p = { .min = 4, .max = 128 },
233 	.p1 = { .min = 2, .max = 33 },
234 	.p2 = { .dot_limit = 165000,
235 		.p2_slow = 4, .p2_fast = 2 },
236 };
237 
238 static const struct intel_limit intel_limits_i8xx_dvo = {
239 	.dot = { .min = 25000, .max = 350000 },
240 	.vco = { .min = 908000, .max = 1512000 },
241 	.n = { .min = 2, .max = 16 },
242 	.m = { .min = 96, .max = 140 },
243 	.m1 = { .min = 18, .max = 26 },
244 	.m2 = { .min = 6, .max = 16 },
245 	.p = { .min = 4, .max = 128 },
246 	.p1 = { .min = 2, .max = 33 },
247 	.p2 = { .dot_limit = 165000,
248 		.p2_slow = 4, .p2_fast = 4 },
249 };
250 
251 static const struct intel_limit intel_limits_i8xx_lvds = {
252 	.dot = { .min = 25000, .max = 350000 },
253 	.vco = { .min = 908000, .max = 1512000 },
254 	.n = { .min = 2, .max = 16 },
255 	.m = { .min = 96, .max = 140 },
256 	.m1 = { .min = 18, .max = 26 },
257 	.m2 = { .min = 6, .max = 16 },
258 	.p = { .min = 4, .max = 128 },
259 	.p1 = { .min = 1, .max = 6 },
260 	.p2 = { .dot_limit = 165000,
261 		.p2_slow = 14, .p2_fast = 7 },
262 };
263 
264 static const struct intel_limit intel_limits_i9xx_sdvo = {
265 	.dot = { .min = 20000, .max = 400000 },
266 	.vco = { .min = 1400000, .max = 2800000 },
267 	.n = { .min = 1, .max = 6 },
268 	.m = { .min = 70, .max = 120 },
269 	.m1 = { .min = 8, .max = 18 },
270 	.m2 = { .min = 3, .max = 7 },
271 	.p = { .min = 5, .max = 80 },
272 	.p1 = { .min = 1, .max = 8 },
273 	.p2 = { .dot_limit = 200000,
274 		.p2_slow = 10, .p2_fast = 5 },
275 };
276 
277 static const struct intel_limit intel_limits_i9xx_lvds = {
278 	.dot = { .min = 20000, .max = 400000 },
279 	.vco = { .min = 1400000, .max = 2800000 },
280 	.n = { .min = 1, .max = 6 },
281 	.m = { .min = 70, .max = 120 },
282 	.m1 = { .min = 8, .max = 18 },
283 	.m2 = { .min = 3, .max = 7 },
284 	.p = { .min = 7, .max = 98 },
285 	.p1 = { .min = 1, .max = 8 },
286 	.p2 = { .dot_limit = 112000,
287 		.p2_slow = 14, .p2_fast = 7 },
288 };
289 
290 
291 static const struct intel_limit intel_limits_g4x_sdvo = {
292 	.dot = { .min = 25000, .max = 270000 },
293 	.vco = { .min = 1750000, .max = 3500000},
294 	.n = { .min = 1, .max = 4 },
295 	.m = { .min = 104, .max = 138 },
296 	.m1 = { .min = 17, .max = 23 },
297 	.m2 = { .min = 5, .max = 11 },
298 	.p = { .min = 10, .max = 30 },
299 	.p1 = { .min = 1, .max = 3},
300 	.p2 = { .dot_limit = 270000,
301 		.p2_slow = 10,
302 		.p2_fast = 10
303 	},
304 };
305 
306 static const struct intel_limit intel_limits_g4x_hdmi = {
307 	.dot = { .min = 22000, .max = 400000 },
308 	.vco = { .min = 1750000, .max = 3500000},
309 	.n = { .min = 1, .max = 4 },
310 	.m = { .min = 104, .max = 138 },
311 	.m1 = { .min = 16, .max = 23 },
312 	.m2 = { .min = 5, .max = 11 },
313 	.p = { .min = 5, .max = 80 },
314 	.p1 = { .min = 1, .max = 8},
315 	.p2 = { .dot_limit = 165000,
316 		.p2_slow = 10, .p2_fast = 5 },
317 };
318 
319 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
320 	.dot = { .min = 20000, .max = 115000 },
321 	.vco = { .min = 1750000, .max = 3500000 },
322 	.n = { .min = 1, .max = 3 },
323 	.m = { .min = 104, .max = 138 },
324 	.m1 = { .min = 17, .max = 23 },
325 	.m2 = { .min = 5, .max = 11 },
326 	.p = { .min = 28, .max = 112 },
327 	.p1 = { .min = 2, .max = 8 },
328 	.p2 = { .dot_limit = 0,
329 		.p2_slow = 14, .p2_fast = 14
330 	},
331 };
332 
333 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
334 	.dot = { .min = 80000, .max = 224000 },
335 	.vco = { .min = 1750000, .max = 3500000 },
336 	.n = { .min = 1, .max = 3 },
337 	.m = { .min = 104, .max = 138 },
338 	.m1 = { .min = 17, .max = 23 },
339 	.m2 = { .min = 5, .max = 11 },
340 	.p = { .min = 14, .max = 42 },
341 	.p1 = { .min = 2, .max = 6 },
342 	.p2 = { .dot_limit = 0,
343 		.p2_slow = 7, .p2_fast = 7
344 	},
345 };
346 
347 static const struct intel_limit intel_limits_pineview_sdvo = {
348 	.dot = { .min = 20000, .max = 400000},
349 	.vco = { .min = 1700000, .max = 3500000 },
350 	/* Pineview's Ncounter is a ring counter */
351 	.n = { .min = 3, .max = 6 },
352 	.m = { .min = 2, .max = 256 },
353 	/* Pineview only has one combined m divider, which we treat as m2. */
354 	.m1 = { .min = 0, .max = 0 },
355 	.m2 = { .min = 0, .max = 254 },
356 	.p = { .min = 5, .max = 80 },
357 	.p1 = { .min = 1, .max = 8 },
358 	.p2 = { .dot_limit = 200000,
359 		.p2_slow = 10, .p2_fast = 5 },
360 };
361 
362 static const struct intel_limit intel_limits_pineview_lvds = {
363 	.dot = { .min = 20000, .max = 400000 },
364 	.vco = { .min = 1700000, .max = 3500000 },
365 	.n = { .min = 3, .max = 6 },
366 	.m = { .min = 2, .max = 256 },
367 	.m1 = { .min = 0, .max = 0 },
368 	.m2 = { .min = 0, .max = 254 },
369 	.p = { .min = 7, .max = 112 },
370 	.p1 = { .min = 1, .max = 8 },
371 	.p2 = { .dot_limit = 112000,
372 		.p2_slow = 14, .p2_fast = 14 },
373 };
374 
375 /* Ironlake / Sandybridge
376  *
377  * We calculate clock using (register_value + 2) for N/M1/M2, so here
378  * the range value for them is (actual_value - 2).
379  */
380 static const struct intel_limit intel_limits_ironlake_dac = {
381 	.dot = { .min = 25000, .max = 350000 },
382 	.vco = { .min = 1760000, .max = 3510000 },
383 	.n = { .min = 1, .max = 5 },
384 	.m = { .min = 79, .max = 127 },
385 	.m1 = { .min = 12, .max = 22 },
386 	.m2 = { .min = 5, .max = 9 },
387 	.p = { .min = 5, .max = 80 },
388 	.p1 = { .min = 1, .max = 8 },
389 	.p2 = { .dot_limit = 225000,
390 		.p2_slow = 10, .p2_fast = 5 },
391 };
392 
393 static const struct intel_limit intel_limits_ironlake_single_lvds = {
394 	.dot = { .min = 25000, .max = 350000 },
395 	.vco = { .min = 1760000, .max = 3510000 },
396 	.n = { .min = 1, .max = 3 },
397 	.m = { .min = 79, .max = 118 },
398 	.m1 = { .min = 12, .max = 22 },
399 	.m2 = { .min = 5, .max = 9 },
400 	.p = { .min = 28, .max = 112 },
401 	.p1 = { .min = 2, .max = 8 },
402 	.p2 = { .dot_limit = 225000,
403 		.p2_slow = 14, .p2_fast = 14 },
404 };
405 
406 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
407 	.dot = { .min = 25000, .max = 350000 },
408 	.vco = { .min = 1760000, .max = 3510000 },
409 	.n = { .min = 1, .max = 3 },
410 	.m = { .min = 79, .max = 127 },
411 	.m1 = { .min = 12, .max = 22 },
412 	.m2 = { .min = 5, .max = 9 },
413 	.p = { .min = 14, .max = 56 },
414 	.p1 = { .min = 2, .max = 8 },
415 	.p2 = { .dot_limit = 225000,
416 		.p2_slow = 7, .p2_fast = 7 },
417 };
418 
419 /* LVDS 100mhz refclk limits. */
420 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
421 	.dot = { .min = 25000, .max = 350000 },
422 	.vco = { .min = 1760000, .max = 3510000 },
423 	.n = { .min = 1, .max = 2 },
424 	.m = { .min = 79, .max = 126 },
425 	.m1 = { .min = 12, .max = 22 },
426 	.m2 = { .min = 5, .max = 9 },
427 	.p = { .min = 28, .max = 112 },
428 	.p1 = { .min = 2, .max = 8 },
429 	.p2 = { .dot_limit = 225000,
430 		.p2_slow = 14, .p2_fast = 14 },
431 };
432 
433 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
434 	.dot = { .min = 25000, .max = 350000 },
435 	.vco = { .min = 1760000, .max = 3510000 },
436 	.n = { .min = 1, .max = 3 },
437 	.m = { .min = 79, .max = 126 },
438 	.m1 = { .min = 12, .max = 22 },
439 	.m2 = { .min = 5, .max = 9 },
440 	.p = { .min = 14, .max = 42 },
441 	.p1 = { .min = 2, .max = 6 },
442 	.p2 = { .dot_limit = 225000,
443 		.p2_slow = 7, .p2_fast = 7 },
444 };
445 
446 static const struct intel_limit intel_limits_vlv = {
447 	 /*
448 	  * These are the data rate limits (measured in fast clocks)
449 	  * since those are the strictest limits we have. The fast
450 	  * clock and actual rate limits are more relaxed, so checking
451 	  * them would make no difference.
452 	  */
453 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
454 	.vco = { .min = 4000000, .max = 6000000 },
455 	.n = { .min = 1, .max = 7 },
456 	.m1 = { .min = 2, .max = 3 },
457 	.m2 = { .min = 11, .max = 156 },
458 	.p1 = { .min = 2, .max = 3 },
459 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
460 };
461 
462 static const struct intel_limit intel_limits_chv = {
463 	/*
464 	 * These are the data rate limits (measured in fast clocks)
465 	 * since those are the strictest limits we have.  The fast
466 	 * clock and actual rate limits are more relaxed, so checking
467 	 * them would make no difference.
468 	 */
469 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
470 	.vco = { .min = 4800000, .max = 6480000 },
471 	.n = { .min = 1, .max = 1 },
472 	.m1 = { .min = 2, .max = 2 },
473 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
474 	.p1 = { .min = 2, .max = 4 },
475 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
476 };
477 
478 static const struct intel_limit intel_limits_bxt = {
479 	/* FIXME: find real dot limits */
480 	.dot = { .min = 0, .max = INT_MAX },
481 	.vco = { .min = 4800000, .max = 6700000 },
482 	.n = { .min = 1, .max = 1 },
483 	.m1 = { .min = 2, .max = 2 },
484 	/* FIXME: find real m2 limits */
485 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
486 	.p1 = { .min = 2, .max = 4 },
487 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
488 };
489 
490 /* WA Display #0827: Gen9:all */
491 static void
492 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
493 {
494 	if (enable)
495 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
496 			   I915_READ(CLKGATE_DIS_PSL(pipe)) |
497 			   DUPS1_GATING_DIS | DUPS2_GATING_DIS);
498 	else
499 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
500 			   I915_READ(CLKGATE_DIS_PSL(pipe)) &
501 			   ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
502 }
503 
504 /* Wa_2006604312:icl */
505 static void
506 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
507 		       bool enable)
508 {
509 	if (enable)
510 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
511 			   I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
512 	else
513 		I915_WRITE(CLKGATE_DIS_PSL(pipe),
514 			   I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
515 }
516 
517 static bool
518 needs_modeset(const struct drm_crtc_state *state)
519 {
520 	return drm_atomic_crtc_needs_modeset(state);
521 }
522 
523 /*
524  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
525  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
526  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
527  * The helpers' return value is the rate of the clock that is fed to the
528  * display engine's pipe which can be the above fast dot clock rate or a
529  * divided-down version of it.
530  */
531 /* m1 is reserved as 0 in Pineview, n is a ring counter */
532 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
533 {
534 	clock->m = clock->m2 + 2;
535 	clock->p = clock->p1 * clock->p2;
536 	if (WARN_ON(clock->n == 0 || clock->p == 0))
537 		return 0;
538 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
539 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
540 
541 	return clock->dot;
542 }
543 
544 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
545 {
546 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
547 }
548 
549 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
550 {
551 	clock->m = i9xx_dpll_compute_m(clock);
552 	clock->p = clock->p1 * clock->p2;
553 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
554 		return 0;
555 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
556 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557 
558 	return clock->dot;
559 }
560 
561 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
562 {
563 	clock->m = clock->m1 * clock->m2;
564 	clock->p = clock->p1 * clock->p2;
565 	if (WARN_ON(clock->n == 0 || clock->p == 0))
566 		return 0;
567 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
568 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
569 
570 	return clock->dot / 5;
571 }
572 
573 int chv_calc_dpll_params(int refclk, struct dpll *clock)
574 {
575 	clock->m = clock->m1 * clock->m2;
576 	clock->p = clock->p1 * clock->p2;
577 	if (WARN_ON(clock->n == 0 || clock->p == 0))
578 		return 0;
579 	clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
580 					   clock->n << 22);
581 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
582 
583 	return clock->dot / 5;
584 }
585 
586 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
587 
588 /*
589  * Returns whether the given set of divisors are valid for a given refclk with
590  * the given connectors.
591  */
592 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
593 			       const struct intel_limit *limit,
594 			       const struct dpll *clock)
595 {
596 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
597 		INTELPllInvalid("n out of range\n");
598 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
599 		INTELPllInvalid("p1 out of range\n");
600 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
601 		INTELPllInvalid("m2 out of range\n");
602 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
603 		INTELPllInvalid("m1 out of range\n");
604 
605 	if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
606 	    !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
607 		if (clock->m1 <= clock->m2)
608 			INTELPllInvalid("m1 <= m2\n");
609 
610 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
611 	    !IS_GEN9_LP(dev_priv)) {
612 		if (clock->p < limit->p.min || limit->p.max < clock->p)
613 			INTELPllInvalid("p out of range\n");
614 		if (clock->m < limit->m.min || limit->m.max < clock->m)
615 			INTELPllInvalid("m out of range\n");
616 	}
617 
618 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
619 		INTELPllInvalid("vco out of range\n");
620 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
621 	 * connector, etc., rather than just a single range.
622 	 */
623 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
624 		INTELPllInvalid("dot out of range\n");
625 
626 	return true;
627 }
628 
629 static int
630 i9xx_select_p2_div(const struct intel_limit *limit,
631 		   const struct intel_crtc_state *crtc_state,
632 		   int target)
633 {
634 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
635 
636 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
637 		/*
638 		 * For LVDS just rely on its current settings for dual-channel.
639 		 * We haven't figured out how to reliably set up different
640 		 * single/dual channel state, if we even can.
641 		 */
642 		if (intel_is_dual_link_lvds(dev_priv))
643 			return limit->p2.p2_fast;
644 		else
645 			return limit->p2.p2_slow;
646 	} else {
647 		if (target < limit->p2.dot_limit)
648 			return limit->p2.p2_slow;
649 		else
650 			return limit->p2.p2_fast;
651 	}
652 }
653 
654 /*
655  * Returns a set of divisors for the desired target clock with the given
656  * refclk, or FALSE.  The returned values represent the clock equation:
657  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
658  *
659  * Target and reference clocks are specified in kHz.
660  *
661  * If match_clock is provided, then best_clock P divider must match the P
662  * divider from @match_clock used for LVDS downclocking.
663  */
664 static bool
665 i9xx_find_best_dpll(const struct intel_limit *limit,
666 		    struct intel_crtc_state *crtc_state,
667 		    int target, int refclk, struct dpll *match_clock,
668 		    struct dpll *best_clock)
669 {
670 	struct drm_device *dev = crtc_state->base.crtc->dev;
671 	struct dpll clock;
672 	int err = target;
673 
674 	memset(best_clock, 0, sizeof(*best_clock));
675 
676 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
677 
678 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
679 	     clock.m1++) {
680 		for (clock.m2 = limit->m2.min;
681 		     clock.m2 <= limit->m2.max; clock.m2++) {
682 			if (clock.m2 >= clock.m1)
683 				break;
684 			for (clock.n = limit->n.min;
685 			     clock.n <= limit->n.max; clock.n++) {
686 				for (clock.p1 = limit->p1.min;
687 					clock.p1 <= limit->p1.max; clock.p1++) {
688 					int this_err;
689 
690 					i9xx_calc_dpll_params(refclk, &clock);
691 					if (!intel_PLL_is_valid(to_i915(dev),
692 								limit,
693 								&clock))
694 						continue;
695 					if (match_clock &&
696 					    clock.p != match_clock->p)
697 						continue;
698 
699 					this_err = abs(clock.dot - target);
700 					if (this_err < err) {
701 						*best_clock = clock;
702 						err = this_err;
703 					}
704 				}
705 			}
706 		}
707 	}
708 
709 	return (err != target);
710 }
711 
712 /*
713  * Returns a set of divisors for the desired target clock with the given
714  * refclk, or FALSE.  The returned values represent the clock equation:
715  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
716  *
717  * Target and reference clocks are specified in kHz.
718  *
719  * If match_clock is provided, then best_clock P divider must match the P
720  * divider from @match_clock used for LVDS downclocking.
721  */
722 static bool
723 pnv_find_best_dpll(const struct intel_limit *limit,
724 		   struct intel_crtc_state *crtc_state,
725 		   int target, int refclk, struct dpll *match_clock,
726 		   struct dpll *best_clock)
727 {
728 	struct drm_device *dev = crtc_state->base.crtc->dev;
729 	struct dpll clock;
730 	int err = target;
731 
732 	memset(best_clock, 0, sizeof(*best_clock));
733 
734 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
735 
736 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
737 	     clock.m1++) {
738 		for (clock.m2 = limit->m2.min;
739 		     clock.m2 <= limit->m2.max; clock.m2++) {
740 			for (clock.n = limit->n.min;
741 			     clock.n <= limit->n.max; clock.n++) {
742 				for (clock.p1 = limit->p1.min;
743 					clock.p1 <= limit->p1.max; clock.p1++) {
744 					int this_err;
745 
746 					pnv_calc_dpll_params(refclk, &clock);
747 					if (!intel_PLL_is_valid(to_i915(dev),
748 								limit,
749 								&clock))
750 						continue;
751 					if (match_clock &&
752 					    clock.p != match_clock->p)
753 						continue;
754 
755 					this_err = abs(clock.dot - target);
756 					if (this_err < err) {
757 						*best_clock = clock;
758 						err = this_err;
759 					}
760 				}
761 			}
762 		}
763 	}
764 
765 	return (err != target);
766 }
767 
768 /*
769  * Returns a set of divisors for the desired target clock with the given
770  * refclk, or FALSE.  The returned values represent the clock equation:
771  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
772  *
773  * Target and reference clocks are specified in kHz.
774  *
775  * If match_clock is provided, then best_clock P divider must match the P
776  * divider from @match_clock used for LVDS downclocking.
777  */
778 static bool
779 g4x_find_best_dpll(const struct intel_limit *limit,
780 		   struct intel_crtc_state *crtc_state,
781 		   int target, int refclk, struct dpll *match_clock,
782 		   struct dpll *best_clock)
783 {
784 	struct drm_device *dev = crtc_state->base.crtc->dev;
785 	struct dpll clock;
786 	int max_n;
787 	bool found = false;
788 	/* approximately equals target * 0.00585 */
789 	int err_most = (target >> 8) + (target >> 9);
790 
791 	memset(best_clock, 0, sizeof(*best_clock));
792 
793 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
794 
795 	max_n = limit->n.max;
796 	/* based on hardware requirement, prefer smaller n to precision */
797 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
798 		/* based on hardware requirement, prefere larger m1,m2 */
799 		for (clock.m1 = limit->m1.max;
800 		     clock.m1 >= limit->m1.min; clock.m1--) {
801 			for (clock.m2 = limit->m2.max;
802 			     clock.m2 >= limit->m2.min; clock.m2--) {
803 				for (clock.p1 = limit->p1.max;
804 				     clock.p1 >= limit->p1.min; clock.p1--) {
805 					int this_err;
806 
807 					i9xx_calc_dpll_params(refclk, &clock);
808 					if (!intel_PLL_is_valid(to_i915(dev),
809 								limit,
810 								&clock))
811 						continue;
812 
813 					this_err = abs(clock.dot - target);
814 					if (this_err < err_most) {
815 						*best_clock = clock;
816 						err_most = this_err;
817 						max_n = clock.n;
818 						found = true;
819 					}
820 				}
821 			}
822 		}
823 	}
824 	return found;
825 }
826 
827 /*
828  * Check if the calculated PLL configuration is more optimal compared to the
829  * best configuration and error found so far. Return the calculated error.
830  */
831 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
832 			       const struct dpll *calculated_clock,
833 			       const struct dpll *best_clock,
834 			       unsigned int best_error_ppm,
835 			       unsigned int *error_ppm)
836 {
837 	/*
838 	 * For CHV ignore the error and consider only the P value.
839 	 * Prefer a bigger P value based on HW requirements.
840 	 */
841 	if (IS_CHERRYVIEW(to_i915(dev))) {
842 		*error_ppm = 0;
843 
844 		return calculated_clock->p > best_clock->p;
845 	}
846 
847 	if (WARN_ON_ONCE(!target_freq))
848 		return false;
849 
850 	*error_ppm = div_u64(1000000ULL *
851 				abs(target_freq - calculated_clock->dot),
852 			     target_freq);
853 	/*
854 	 * Prefer a better P value over a better (smaller) error if the error
855 	 * is small. Ensure this preference for future configurations too by
856 	 * setting the error to 0.
857 	 */
858 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
859 		*error_ppm = 0;
860 
861 		return true;
862 	}
863 
864 	return *error_ppm + 10 < best_error_ppm;
865 }
866 
867 /*
868  * Returns a set of divisors for the desired target clock with the given
869  * refclk, or FALSE.  The returned values represent the clock equation:
870  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
871  */
872 static bool
873 vlv_find_best_dpll(const struct intel_limit *limit,
874 		   struct intel_crtc_state *crtc_state,
875 		   int target, int refclk, struct dpll *match_clock,
876 		   struct dpll *best_clock)
877 {
878 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
879 	struct drm_device *dev = crtc->base.dev;
880 	struct dpll clock;
881 	unsigned int bestppm = 1000000;
882 	/* min update 19.2 MHz */
883 	int max_n = min(limit->n.max, refclk / 19200);
884 	bool found = false;
885 
886 	target *= 5; /* fast clock */
887 
888 	memset(best_clock, 0, sizeof(*best_clock));
889 
890 	/* based on hardware requirement, prefer smaller n to precision */
891 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
892 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
894 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
895 				clock.p = clock.p1 * clock.p2;
896 				/* based on hardware requirement, prefer bigger m1,m2 values */
897 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
898 					unsigned int ppm;
899 
900 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
901 								     refclk * clock.m1);
902 
903 					vlv_calc_dpll_params(refclk, &clock);
904 
905 					if (!intel_PLL_is_valid(to_i915(dev),
906 								limit,
907 								&clock))
908 						continue;
909 
910 					if (!vlv_PLL_is_optimal(dev, target,
911 								&clock,
912 								best_clock,
913 								bestppm, &ppm))
914 						continue;
915 
916 					*best_clock = clock;
917 					bestppm = ppm;
918 					found = true;
919 				}
920 			}
921 		}
922 	}
923 
924 	return found;
925 }
926 
927 /*
928  * Returns a set of divisors for the desired target clock with the given
929  * refclk, or FALSE.  The returned values represent the clock equation:
930  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
931  */
932 static bool
933 chv_find_best_dpll(const struct intel_limit *limit,
934 		   struct intel_crtc_state *crtc_state,
935 		   int target, int refclk, struct dpll *match_clock,
936 		   struct dpll *best_clock)
937 {
938 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
939 	struct drm_device *dev = crtc->base.dev;
940 	unsigned int best_error_ppm;
941 	struct dpll clock;
942 	u64 m2;
943 	int found = false;
944 
945 	memset(best_clock, 0, sizeof(*best_clock));
946 	best_error_ppm = 1000000;
947 
948 	/*
949 	 * Based on hardware doc, the n always set to 1, and m1 always
950 	 * set to 2.  If requires to support 200Mhz refclk, we need to
951 	 * revisit this because n may not 1 anymore.
952 	 */
953 	clock.n = 1, clock.m1 = 2;
954 	target *= 5;	/* fast clock */
955 
956 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
957 		for (clock.p2 = limit->p2.p2_fast;
958 				clock.p2 >= limit->p2.p2_slow;
959 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
960 			unsigned int error_ppm;
961 
962 			clock.p = clock.p1 * clock.p2;
963 
964 			m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
965 						   refclk * clock.m1);
966 
967 			if (m2 > INT_MAX/clock.m1)
968 				continue;
969 
970 			clock.m2 = m2;
971 
972 			chv_calc_dpll_params(refclk, &clock);
973 
974 			if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
975 				continue;
976 
977 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
978 						best_error_ppm, &error_ppm))
979 				continue;
980 
981 			*best_clock = clock;
982 			best_error_ppm = error_ppm;
983 			found = true;
984 		}
985 	}
986 
987 	return found;
988 }
989 
990 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
991 			struct dpll *best_clock)
992 {
993 	int refclk = 100000;
994 	const struct intel_limit *limit = &intel_limits_bxt;
995 
996 	return chv_find_best_dpll(limit, crtc_state,
997 				  crtc_state->port_clock, refclk,
998 				  NULL, best_clock);
999 }
1000 
1001 bool intel_crtc_active(struct intel_crtc *crtc)
1002 {
1003 	/* Be paranoid as we can arrive here with only partial
1004 	 * state retrieved from the hardware during setup.
1005 	 *
1006 	 * We can ditch the adjusted_mode.crtc_clock check as soon
1007 	 * as Haswell has gained clock readout/fastboot support.
1008 	 *
1009 	 * We can ditch the crtc->primary->state->fb check as soon as we can
1010 	 * properly reconstruct framebuffers.
1011 	 *
1012 	 * FIXME: The intel_crtc->active here should be switched to
1013 	 * crtc->state->active once we have proper CRTC states wired up
1014 	 * for atomic.
1015 	 */
1016 	return crtc->active && crtc->base.primary->state->fb &&
1017 		crtc->config->base.adjusted_mode.crtc_clock;
1018 }
1019 
1020 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1021 					     enum pipe pipe)
1022 {
1023 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1024 
1025 	return crtc->config->cpu_transcoder;
1026 }
1027 
1028 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1029 				    enum pipe pipe)
1030 {
1031 	i915_reg_t reg = PIPEDSL(pipe);
1032 	u32 line1, line2;
1033 	u32 line_mask;
1034 
1035 	if (IS_GEN(dev_priv, 2))
1036 		line_mask = DSL_LINEMASK_GEN2;
1037 	else
1038 		line_mask = DSL_LINEMASK_GEN3;
1039 
1040 	line1 = I915_READ(reg) & line_mask;
1041 	msleep(5);
1042 	line2 = I915_READ(reg) & line_mask;
1043 
1044 	return line1 != line2;
1045 }
1046 
1047 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1048 {
1049 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1050 	enum pipe pipe = crtc->pipe;
1051 
1052 	/* Wait for the display line to settle/start moving */
1053 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1054 		DRM_ERROR("pipe %c scanline %s wait timed out\n",
1055 			  pipe_name(pipe), onoff(state));
1056 }
1057 
1058 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1059 {
1060 	wait_for_pipe_scanline_moving(crtc, false);
1061 }
1062 
1063 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1064 {
1065 	wait_for_pipe_scanline_moving(crtc, true);
1066 }
1067 
1068 static void
1069 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1070 {
1071 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1072 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1073 
1074 	if (INTEL_GEN(dev_priv) >= 4) {
1075 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1076 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1077 
1078 		/* Wait for the Pipe State to go off */
1079 		if (intel_wait_for_register(&dev_priv->uncore,
1080 					    reg, I965_PIPECONF_ACTIVE, 0,
1081 					    100))
1082 			WARN(1, "pipe_off wait timed out\n");
1083 	} else {
1084 		intel_wait_for_pipe_scanline_stopped(crtc);
1085 	}
1086 }
1087 
1088 /* Only for pre-ILK configs */
1089 void assert_pll(struct drm_i915_private *dev_priv,
1090 		enum pipe pipe, bool state)
1091 {
1092 	u32 val;
1093 	bool cur_state;
1094 
1095 	val = I915_READ(DPLL(pipe));
1096 	cur_state = !!(val & DPLL_VCO_ENABLE);
1097 	I915_STATE_WARN(cur_state != state,
1098 	     "PLL state assertion failure (expected %s, current %s)\n",
1099 			onoff(state), onoff(cur_state));
1100 }
1101 
1102 /* XXX: the dsi pll is shared between MIPI DSI ports */
1103 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1104 {
1105 	u32 val;
1106 	bool cur_state;
1107 
1108 	vlv_cck_get(dev_priv);
1109 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1110 	vlv_cck_put(dev_priv);
1111 
1112 	cur_state = val & DSI_PLL_VCO_EN;
1113 	I915_STATE_WARN(cur_state != state,
1114 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1115 			onoff(state), onoff(cur_state));
1116 }
1117 
1118 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1119 			  enum pipe pipe, bool state)
1120 {
1121 	bool cur_state;
1122 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1123 								      pipe);
1124 
1125 	if (HAS_DDI(dev_priv)) {
1126 		/* DDI does not have a specific FDI_TX register */
1127 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1128 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1129 	} else {
1130 		u32 val = I915_READ(FDI_TX_CTL(pipe));
1131 		cur_state = !!(val & FDI_TX_ENABLE);
1132 	}
1133 	I915_STATE_WARN(cur_state != state,
1134 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1135 			onoff(state), onoff(cur_state));
1136 }
1137 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1138 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1139 
1140 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1141 			  enum pipe pipe, bool state)
1142 {
1143 	u32 val;
1144 	bool cur_state;
1145 
1146 	val = I915_READ(FDI_RX_CTL(pipe));
1147 	cur_state = !!(val & FDI_RX_ENABLE);
1148 	I915_STATE_WARN(cur_state != state,
1149 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1150 			onoff(state), onoff(cur_state));
1151 }
1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154 
1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156 				      enum pipe pipe)
1157 {
1158 	u32 val;
1159 
1160 	/* ILK FDI PLL is always enabled */
1161 	if (IS_GEN(dev_priv, 5))
1162 		return;
1163 
1164 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1165 	if (HAS_DDI(dev_priv))
1166 		return;
1167 
1168 	val = I915_READ(FDI_TX_CTL(pipe));
1169 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1170 }
1171 
1172 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1173 		       enum pipe pipe, bool state)
1174 {
1175 	u32 val;
1176 	bool cur_state;
1177 
1178 	val = I915_READ(FDI_RX_CTL(pipe));
1179 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1180 	I915_STATE_WARN(cur_state != state,
1181 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1182 			onoff(state), onoff(cur_state));
1183 }
1184 
1185 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1186 {
1187 	i915_reg_t pp_reg;
1188 	u32 val;
1189 	enum pipe panel_pipe = INVALID_PIPE;
1190 	bool locked = true;
1191 
1192 	if (WARN_ON(HAS_DDI(dev_priv)))
1193 		return;
1194 
1195 	if (HAS_PCH_SPLIT(dev_priv)) {
1196 		u32 port_sel;
1197 
1198 		pp_reg = PP_CONTROL(0);
1199 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1200 
1201 		switch (port_sel) {
1202 		case PANEL_PORT_SELECT_LVDS:
1203 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1204 			break;
1205 		case PANEL_PORT_SELECT_DPA:
1206 			intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1207 			break;
1208 		case PANEL_PORT_SELECT_DPC:
1209 			intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1210 			break;
1211 		case PANEL_PORT_SELECT_DPD:
1212 			intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1213 			break;
1214 		default:
1215 			MISSING_CASE(port_sel);
1216 			break;
1217 		}
1218 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1219 		/* presumably write lock depends on pipe, not port select */
1220 		pp_reg = PP_CONTROL(pipe);
1221 		panel_pipe = pipe;
1222 	} else {
1223 		u32 port_sel;
1224 
1225 		pp_reg = PP_CONTROL(0);
1226 		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1227 
1228 		WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1229 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1230 	}
1231 
1232 	val = I915_READ(pp_reg);
1233 	if (!(val & PANEL_POWER_ON) ||
1234 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1235 		locked = false;
1236 
1237 	I915_STATE_WARN(panel_pipe == pipe && locked,
1238 	     "panel assertion failure, pipe %c regs locked\n",
1239 	     pipe_name(pipe));
1240 }
1241 
1242 void assert_pipe(struct drm_i915_private *dev_priv,
1243 		 enum pipe pipe, bool state)
1244 {
1245 	bool cur_state;
1246 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1247 								      pipe);
1248 	enum intel_display_power_domain power_domain;
1249 	intel_wakeref_t wakeref;
1250 
1251 	/* we keep both pipes enabled on 830 */
1252 	if (IS_I830(dev_priv))
1253 		state = true;
1254 
1255 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1256 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1257 	if (wakeref) {
1258 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1259 		cur_state = !!(val & PIPECONF_ENABLE);
1260 
1261 		intel_display_power_put(dev_priv, power_domain, wakeref);
1262 	} else {
1263 		cur_state = false;
1264 	}
1265 
1266 	I915_STATE_WARN(cur_state != state,
1267 	     "pipe %c assertion failure (expected %s, current %s)\n",
1268 			pipe_name(pipe), onoff(state), onoff(cur_state));
1269 }
1270 
1271 static void assert_plane(struct intel_plane *plane, bool state)
1272 {
1273 	enum pipe pipe;
1274 	bool cur_state;
1275 
1276 	cur_state = plane->get_hw_state(plane, &pipe);
1277 
1278 	I915_STATE_WARN(cur_state != state,
1279 			"%s assertion failure (expected %s, current %s)\n",
1280 			plane->base.name, onoff(state), onoff(cur_state));
1281 }
1282 
1283 #define assert_plane_enabled(p) assert_plane(p, true)
1284 #define assert_plane_disabled(p) assert_plane(p, false)
1285 
1286 static void assert_planes_disabled(struct intel_crtc *crtc)
1287 {
1288 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1289 	struct intel_plane *plane;
1290 
1291 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1292 		assert_plane_disabled(plane);
1293 }
1294 
1295 static void assert_vblank_disabled(struct drm_crtc *crtc)
1296 {
1297 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1298 		drm_crtc_vblank_put(crtc);
1299 }
1300 
1301 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1302 				    enum pipe pipe)
1303 {
1304 	u32 val;
1305 	bool enabled;
1306 
1307 	val = I915_READ(PCH_TRANSCONF(pipe));
1308 	enabled = !!(val & TRANS_ENABLE);
1309 	I915_STATE_WARN(enabled,
1310 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1311 	     pipe_name(pipe));
1312 }
1313 
1314 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1315 				   enum pipe pipe, enum port port,
1316 				   i915_reg_t dp_reg)
1317 {
1318 	enum pipe port_pipe;
1319 	bool state;
1320 
1321 	state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1322 
1323 	I915_STATE_WARN(state && port_pipe == pipe,
1324 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
1325 			port_name(port), pipe_name(pipe));
1326 
1327 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1328 			"IBX PCH DP %c still using transcoder B\n",
1329 			port_name(port));
1330 }
1331 
1332 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1333 				     enum pipe pipe, enum port port,
1334 				     i915_reg_t hdmi_reg)
1335 {
1336 	enum pipe port_pipe;
1337 	bool state;
1338 
1339 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1340 
1341 	I915_STATE_WARN(state && port_pipe == pipe,
1342 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1343 			port_name(port), pipe_name(pipe));
1344 
1345 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1346 			"IBX PCH HDMI %c still using transcoder B\n",
1347 			port_name(port));
1348 }
1349 
1350 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1351 				      enum pipe pipe)
1352 {
1353 	enum pipe port_pipe;
1354 
1355 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1356 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1357 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1358 
1359 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1360 			port_pipe == pipe,
1361 			"PCH VGA enabled on transcoder %c, should be disabled\n",
1362 			pipe_name(pipe));
1363 
1364 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1365 			port_pipe == pipe,
1366 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
1367 			pipe_name(pipe));
1368 
1369 	/* PCH SDVOB multiplex with HDMIB */
1370 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1371 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1372 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1373 }
1374 
1375 static void _vlv_enable_pll(struct intel_crtc *crtc,
1376 			    const struct intel_crtc_state *pipe_config)
1377 {
1378 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379 	enum pipe pipe = crtc->pipe;
1380 
1381 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1382 	POSTING_READ(DPLL(pipe));
1383 	udelay(150);
1384 
1385 	if (intel_wait_for_register(&dev_priv->uncore,
1386 				    DPLL(pipe),
1387 				    DPLL_LOCK_VLV,
1388 				    DPLL_LOCK_VLV,
1389 				    1))
1390 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
1391 }
1392 
1393 static void vlv_enable_pll(struct intel_crtc *crtc,
1394 			   const struct intel_crtc_state *pipe_config)
1395 {
1396 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1397 	enum pipe pipe = crtc->pipe;
1398 
1399 	assert_pipe_disabled(dev_priv, pipe);
1400 
1401 	/* PLL is protected by panel, make sure we can write it */
1402 	assert_panel_unlocked(dev_priv, pipe);
1403 
1404 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1405 		_vlv_enable_pll(crtc, pipe_config);
1406 
1407 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1408 	POSTING_READ(DPLL_MD(pipe));
1409 }
1410 
1411 
1412 static void _chv_enable_pll(struct intel_crtc *crtc,
1413 			    const struct intel_crtc_state *pipe_config)
1414 {
1415 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1416 	enum pipe pipe = crtc->pipe;
1417 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1418 	u32 tmp;
1419 
1420 	vlv_dpio_get(dev_priv);
1421 
1422 	/* Enable back the 10bit clock to display controller */
1423 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1424 	tmp |= DPIO_DCLKP_EN;
1425 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1426 
1427 	vlv_dpio_put(dev_priv);
1428 
1429 	/*
1430 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1431 	 */
1432 	udelay(1);
1433 
1434 	/* Enable PLL */
1435 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1436 
1437 	/* Check PLL is locked */
1438 	if (intel_wait_for_register(&dev_priv->uncore,
1439 				    DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1440 				    1))
1441 		DRM_ERROR("PLL %d failed to lock\n", pipe);
1442 }
1443 
1444 static void chv_enable_pll(struct intel_crtc *crtc,
1445 			   const struct intel_crtc_state *pipe_config)
1446 {
1447 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1448 	enum pipe pipe = crtc->pipe;
1449 
1450 	assert_pipe_disabled(dev_priv, pipe);
1451 
1452 	/* PLL is protected by panel, make sure we can write it */
1453 	assert_panel_unlocked(dev_priv, pipe);
1454 
1455 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1456 		_chv_enable_pll(crtc, pipe_config);
1457 
1458 	if (pipe != PIPE_A) {
1459 		/*
1460 		 * WaPixelRepeatModeFixForC0:chv
1461 		 *
1462 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1463 		 * the value from DPLLBMD to either pipe B or C.
1464 		 */
1465 		I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1466 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1467 		I915_WRITE(CBR4_VLV, 0);
1468 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1469 
1470 		/*
1471 		 * DPLLB VGA mode also seems to cause problems.
1472 		 * We should always have it disabled.
1473 		 */
1474 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1475 	} else {
1476 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1477 		POSTING_READ(DPLL_MD(pipe));
1478 	}
1479 }
1480 
1481 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1482 {
1483 	if (IS_I830(dev_priv))
1484 		return false;
1485 
1486 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1487 }
1488 
1489 static void i9xx_enable_pll(struct intel_crtc *crtc,
1490 			    const struct intel_crtc_state *crtc_state)
1491 {
1492 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1493 	i915_reg_t reg = DPLL(crtc->pipe);
1494 	u32 dpll = crtc_state->dpll_hw_state.dpll;
1495 	int i;
1496 
1497 	assert_pipe_disabled(dev_priv, crtc->pipe);
1498 
1499 	/* PLL is protected by panel, make sure we can write it */
1500 	if (i9xx_has_pps(dev_priv))
1501 		assert_panel_unlocked(dev_priv, crtc->pipe);
1502 
1503 	/*
1504 	 * Apparently we need to have VGA mode enabled prior to changing
1505 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1506 	 * dividers, even though the register value does change.
1507 	 */
1508 	I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1509 	I915_WRITE(reg, dpll);
1510 
1511 	/* Wait for the clocks to stabilize. */
1512 	POSTING_READ(reg);
1513 	udelay(150);
1514 
1515 	if (INTEL_GEN(dev_priv) >= 4) {
1516 		I915_WRITE(DPLL_MD(crtc->pipe),
1517 			   crtc_state->dpll_hw_state.dpll_md);
1518 	} else {
1519 		/* The pixel multiplier can only be updated once the
1520 		 * DPLL is enabled and the clocks are stable.
1521 		 *
1522 		 * So write it again.
1523 		 */
1524 		I915_WRITE(reg, dpll);
1525 	}
1526 
1527 	/* We do this three times for luck */
1528 	for (i = 0; i < 3; i++) {
1529 		I915_WRITE(reg, dpll);
1530 		POSTING_READ(reg);
1531 		udelay(150); /* wait for warmup */
1532 	}
1533 }
1534 
1535 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1536 {
1537 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1538 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1539 	enum pipe pipe = crtc->pipe;
1540 
1541 	/* Don't disable pipe or pipe PLLs if needed */
1542 	if (IS_I830(dev_priv))
1543 		return;
1544 
1545 	/* Make sure the pipe isn't still relying on us */
1546 	assert_pipe_disabled(dev_priv, pipe);
1547 
1548 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1549 	POSTING_READ(DPLL(pipe));
1550 }
1551 
1552 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1553 {
1554 	u32 val;
1555 
1556 	/* Make sure the pipe isn't still relying on us */
1557 	assert_pipe_disabled(dev_priv, pipe);
1558 
1559 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1560 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1561 	if (pipe != PIPE_A)
1562 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1563 
1564 	I915_WRITE(DPLL(pipe), val);
1565 	POSTING_READ(DPLL(pipe));
1566 }
1567 
1568 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1569 {
1570 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1571 	u32 val;
1572 
1573 	/* Make sure the pipe isn't still relying on us */
1574 	assert_pipe_disabled(dev_priv, pipe);
1575 
1576 	val = DPLL_SSC_REF_CLK_CHV |
1577 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1578 	if (pipe != PIPE_A)
1579 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1580 
1581 	I915_WRITE(DPLL(pipe), val);
1582 	POSTING_READ(DPLL(pipe));
1583 
1584 	vlv_dpio_get(dev_priv);
1585 
1586 	/* Disable 10bit clock to display controller */
1587 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1588 	val &= ~DPIO_DCLKP_EN;
1589 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1590 
1591 	vlv_dpio_put(dev_priv);
1592 }
1593 
1594 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1595 			 struct intel_digital_port *dport,
1596 			 unsigned int expected_mask)
1597 {
1598 	u32 port_mask;
1599 	i915_reg_t dpll_reg;
1600 
1601 	switch (dport->base.port) {
1602 	case PORT_B:
1603 		port_mask = DPLL_PORTB_READY_MASK;
1604 		dpll_reg = DPLL(0);
1605 		break;
1606 	case PORT_C:
1607 		port_mask = DPLL_PORTC_READY_MASK;
1608 		dpll_reg = DPLL(0);
1609 		expected_mask <<= 4;
1610 		break;
1611 	case PORT_D:
1612 		port_mask = DPLL_PORTD_READY_MASK;
1613 		dpll_reg = DPIO_PHY_STATUS;
1614 		break;
1615 	default:
1616 		BUG();
1617 	}
1618 
1619 	if (intel_wait_for_register(&dev_priv->uncore,
1620 				    dpll_reg, port_mask, expected_mask,
1621 				    1000))
1622 		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1623 		     port_name(dport->base.port),
1624 		     I915_READ(dpll_reg) & port_mask, expected_mask);
1625 }
1626 
1627 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1628 {
1629 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1630 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1631 	enum pipe pipe = crtc->pipe;
1632 	i915_reg_t reg;
1633 	u32 val, pipeconf_val;
1634 
1635 	/* Make sure PCH DPLL is enabled */
1636 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1637 
1638 	/* FDI must be feeding us bits for PCH ports */
1639 	assert_fdi_tx_enabled(dev_priv, pipe);
1640 	assert_fdi_rx_enabled(dev_priv, pipe);
1641 
1642 	if (HAS_PCH_CPT(dev_priv)) {
1643 		/* Workaround: Set the timing override bit before enabling the
1644 		 * pch transcoder. */
1645 		reg = TRANS_CHICKEN2(pipe);
1646 		val = I915_READ(reg);
1647 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1648 		I915_WRITE(reg, val);
1649 	}
1650 
1651 	reg = PCH_TRANSCONF(pipe);
1652 	val = I915_READ(reg);
1653 	pipeconf_val = I915_READ(PIPECONF(pipe));
1654 
1655 	if (HAS_PCH_IBX(dev_priv)) {
1656 		/*
1657 		 * Make the BPC in transcoder be consistent with
1658 		 * that in pipeconf reg. For HDMI we must use 8bpc
1659 		 * here for both 8bpc and 12bpc.
1660 		 */
1661 		val &= ~PIPECONF_BPC_MASK;
1662 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1663 			val |= PIPECONF_8BPC;
1664 		else
1665 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1666 	}
1667 
1668 	val &= ~TRANS_INTERLACE_MASK;
1669 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1670 		if (HAS_PCH_IBX(dev_priv) &&
1671 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1672 			val |= TRANS_LEGACY_INTERLACED_ILK;
1673 		else
1674 			val |= TRANS_INTERLACED;
1675 	} else {
1676 		val |= TRANS_PROGRESSIVE;
1677 	}
1678 
1679 	I915_WRITE(reg, val | TRANS_ENABLE);
1680 	if (intel_wait_for_register(&dev_priv->uncore,
1681 				    reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1682 				    100))
1683 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1684 }
1685 
1686 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1687 				      enum transcoder cpu_transcoder)
1688 {
1689 	u32 val, pipeconf_val;
1690 
1691 	/* FDI must be feeding us bits for PCH ports */
1692 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1693 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
1694 
1695 	/* Workaround: set timing override bit. */
1696 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1697 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1698 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1699 
1700 	val = TRANS_ENABLE;
1701 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1702 
1703 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1704 	    PIPECONF_INTERLACED_ILK)
1705 		val |= TRANS_INTERLACED;
1706 	else
1707 		val |= TRANS_PROGRESSIVE;
1708 
1709 	I915_WRITE(LPT_TRANSCONF, val);
1710 	if (intel_wait_for_register(&dev_priv->uncore,
1711 				    LPT_TRANSCONF,
1712 				    TRANS_STATE_ENABLE,
1713 				    TRANS_STATE_ENABLE,
1714 				    100))
1715 		DRM_ERROR("Failed to enable PCH transcoder\n");
1716 }
1717 
1718 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1719 					    enum pipe pipe)
1720 {
1721 	i915_reg_t reg;
1722 	u32 val;
1723 
1724 	/* FDI relies on the transcoder */
1725 	assert_fdi_tx_disabled(dev_priv, pipe);
1726 	assert_fdi_rx_disabled(dev_priv, pipe);
1727 
1728 	/* Ports must be off as well */
1729 	assert_pch_ports_disabled(dev_priv, pipe);
1730 
1731 	reg = PCH_TRANSCONF(pipe);
1732 	val = I915_READ(reg);
1733 	val &= ~TRANS_ENABLE;
1734 	I915_WRITE(reg, val);
1735 	/* wait for PCH transcoder off, transcoder state */
1736 	if (intel_wait_for_register(&dev_priv->uncore,
1737 				    reg, TRANS_STATE_ENABLE, 0,
1738 				    50))
1739 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1740 
1741 	if (HAS_PCH_CPT(dev_priv)) {
1742 		/* Workaround: Clear the timing override chicken bit again. */
1743 		reg = TRANS_CHICKEN2(pipe);
1744 		val = I915_READ(reg);
1745 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1746 		I915_WRITE(reg, val);
1747 	}
1748 }
1749 
1750 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1751 {
1752 	u32 val;
1753 
1754 	val = I915_READ(LPT_TRANSCONF);
1755 	val &= ~TRANS_ENABLE;
1756 	I915_WRITE(LPT_TRANSCONF, val);
1757 	/* wait for PCH transcoder off, transcoder state */
1758 	if (intel_wait_for_register(&dev_priv->uncore,
1759 				    LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1760 				    50))
1761 		DRM_ERROR("Failed to disable PCH transcoder\n");
1762 
1763 	/* Workaround: clear timing override bit. */
1764 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1765 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1766 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1767 }
1768 
1769 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1770 {
1771 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1772 
1773 	if (HAS_PCH_LPT(dev_priv))
1774 		return PIPE_A;
1775 	else
1776 		return crtc->pipe;
1777 }
1778 
1779 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1780 {
1781 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1782 
1783 	/*
1784 	 * On i965gm the hardware frame counter reads
1785 	 * zero when the TV encoder is enabled :(
1786 	 */
1787 	if (IS_I965GM(dev_priv) &&
1788 	    (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1789 		return 0;
1790 
1791 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1792 		return 0xffffffff; /* full 32 bit counter */
1793 	else if (INTEL_GEN(dev_priv) >= 3)
1794 		return 0xffffff; /* only 24 bits of frame count */
1795 	else
1796 		return 0; /* Gen2 doesn't have a hardware frame counter */
1797 }
1798 
1799 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1800 {
1801 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1802 
1803 	drm_crtc_set_max_vblank_count(&crtc->base,
1804 				      intel_crtc_max_vblank_count(crtc_state));
1805 	drm_crtc_vblank_on(&crtc->base);
1806 }
1807 
1808 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1809 {
1810 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1811 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1812 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1813 	enum pipe pipe = crtc->pipe;
1814 	i915_reg_t reg;
1815 	u32 val;
1816 
1817 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1818 
1819 	assert_planes_disabled(crtc);
1820 
1821 	/*
1822 	 * A pipe without a PLL won't actually be able to drive bits from
1823 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1824 	 * need the check.
1825 	 */
1826 	if (HAS_GMCH(dev_priv)) {
1827 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1828 			assert_dsi_pll_enabled(dev_priv);
1829 		else
1830 			assert_pll_enabled(dev_priv, pipe);
1831 	} else {
1832 		if (new_crtc_state->has_pch_encoder) {
1833 			/* if driving the PCH, we need FDI enabled */
1834 			assert_fdi_rx_pll_enabled(dev_priv,
1835 						  intel_crtc_pch_transcoder(crtc));
1836 			assert_fdi_tx_pll_enabled(dev_priv,
1837 						  (enum pipe) cpu_transcoder);
1838 		}
1839 		/* FIXME: assert CPU port conditions for SNB+ */
1840 	}
1841 
1842 	trace_intel_pipe_enable(dev_priv, pipe);
1843 
1844 	reg = PIPECONF(cpu_transcoder);
1845 	val = I915_READ(reg);
1846 	if (val & PIPECONF_ENABLE) {
1847 		/* we keep both pipes enabled on 830 */
1848 		WARN_ON(!IS_I830(dev_priv));
1849 		return;
1850 	}
1851 
1852 	I915_WRITE(reg, val | PIPECONF_ENABLE);
1853 	POSTING_READ(reg);
1854 
1855 	/*
1856 	 * Until the pipe starts PIPEDSL reads will return a stale value,
1857 	 * which causes an apparent vblank timestamp jump when PIPEDSL
1858 	 * resets to its proper value. That also messes up the frame count
1859 	 * when it's derived from the timestamps. So let's wait for the
1860 	 * pipe to start properly before we call drm_crtc_vblank_on()
1861 	 */
1862 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1863 		intel_wait_for_pipe_scanline_moving(crtc);
1864 }
1865 
1866 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1867 {
1868 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1869 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1870 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1871 	enum pipe pipe = crtc->pipe;
1872 	i915_reg_t reg;
1873 	u32 val;
1874 
1875 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1876 
1877 	/*
1878 	 * Make sure planes won't keep trying to pump pixels to us,
1879 	 * or we might hang the display.
1880 	 */
1881 	assert_planes_disabled(crtc);
1882 
1883 	trace_intel_pipe_disable(dev_priv, pipe);
1884 
1885 	reg = PIPECONF(cpu_transcoder);
1886 	val = I915_READ(reg);
1887 	if ((val & PIPECONF_ENABLE) == 0)
1888 		return;
1889 
1890 	/*
1891 	 * Double wide has implications for planes
1892 	 * so best keep it disabled when not needed.
1893 	 */
1894 	if (old_crtc_state->double_wide)
1895 		val &= ~PIPECONF_DOUBLE_WIDE;
1896 
1897 	/* Don't disable pipe or pipe PLLs if needed */
1898 	if (!IS_I830(dev_priv))
1899 		val &= ~PIPECONF_ENABLE;
1900 
1901 	I915_WRITE(reg, val);
1902 	if ((val & PIPECONF_ENABLE) == 0)
1903 		intel_wait_for_pipe_off(old_crtc_state);
1904 }
1905 
1906 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1907 {
1908 	return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1909 }
1910 
1911 static unsigned int
1912 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1913 {
1914 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1915 	unsigned int cpp = fb->format->cpp[color_plane];
1916 
1917 	switch (fb->modifier) {
1918 	case DRM_FORMAT_MOD_LINEAR:
1919 		return intel_tile_size(dev_priv);
1920 	case I915_FORMAT_MOD_X_TILED:
1921 		if (IS_GEN(dev_priv, 2))
1922 			return 128;
1923 		else
1924 			return 512;
1925 	case I915_FORMAT_MOD_Y_TILED_CCS:
1926 		if (color_plane == 1)
1927 			return 128;
1928 		/* fall through */
1929 	case I915_FORMAT_MOD_Y_TILED:
1930 		if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1931 			return 128;
1932 		else
1933 			return 512;
1934 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1935 		if (color_plane == 1)
1936 			return 128;
1937 		/* fall through */
1938 	case I915_FORMAT_MOD_Yf_TILED:
1939 		switch (cpp) {
1940 		case 1:
1941 			return 64;
1942 		case 2:
1943 		case 4:
1944 			return 128;
1945 		case 8:
1946 		case 16:
1947 			return 256;
1948 		default:
1949 			MISSING_CASE(cpp);
1950 			return cpp;
1951 		}
1952 		break;
1953 	default:
1954 		MISSING_CASE(fb->modifier);
1955 		return cpp;
1956 	}
1957 }
1958 
1959 static unsigned int
1960 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1961 {
1962 	return intel_tile_size(to_i915(fb->dev)) /
1963 		intel_tile_width_bytes(fb, color_plane);
1964 }
1965 
1966 /* Return the tile dimensions in pixel units */
1967 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1968 			    unsigned int *tile_width,
1969 			    unsigned int *tile_height)
1970 {
1971 	unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1972 	unsigned int cpp = fb->format->cpp[color_plane];
1973 
1974 	*tile_width = tile_width_bytes / cpp;
1975 	*tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1976 }
1977 
1978 unsigned int
1979 intel_fb_align_height(const struct drm_framebuffer *fb,
1980 		      int color_plane, unsigned int height)
1981 {
1982 	unsigned int tile_height = intel_tile_height(fb, color_plane);
1983 
1984 	return ALIGN(height, tile_height);
1985 }
1986 
1987 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1988 {
1989 	unsigned int size = 0;
1990 	int i;
1991 
1992 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1993 		size += rot_info->plane[i].width * rot_info->plane[i].height;
1994 
1995 	return size;
1996 }
1997 
1998 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1999 {
2000 	unsigned int size = 0;
2001 	int i;
2002 
2003 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2004 		size += rem_info->plane[i].width * rem_info->plane[i].height;
2005 
2006 	return size;
2007 }
2008 
2009 static void
2010 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2011 			const struct drm_framebuffer *fb,
2012 			unsigned int rotation)
2013 {
2014 	view->type = I915_GGTT_VIEW_NORMAL;
2015 	if (drm_rotation_90_or_270(rotation)) {
2016 		view->type = I915_GGTT_VIEW_ROTATED;
2017 		view->rotated = to_intel_framebuffer(fb)->rot_info;
2018 	}
2019 }
2020 
2021 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2022 {
2023 	if (IS_I830(dev_priv))
2024 		return 16 * 1024;
2025 	else if (IS_I85X(dev_priv))
2026 		return 256;
2027 	else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2028 		return 32;
2029 	else
2030 		return 4 * 1024;
2031 }
2032 
2033 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2034 {
2035 	if (INTEL_GEN(dev_priv) >= 9)
2036 		return 256 * 1024;
2037 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2038 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2039 		return 128 * 1024;
2040 	else if (INTEL_GEN(dev_priv) >= 4)
2041 		return 4 * 1024;
2042 	else
2043 		return 0;
2044 }
2045 
2046 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2047 					 int color_plane)
2048 {
2049 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2050 
2051 	/* AUX_DIST needs only 4K alignment */
2052 	if (color_plane == 1)
2053 		return 4096;
2054 
2055 	switch (fb->modifier) {
2056 	case DRM_FORMAT_MOD_LINEAR:
2057 		return intel_linear_alignment(dev_priv);
2058 	case I915_FORMAT_MOD_X_TILED:
2059 		if (INTEL_GEN(dev_priv) >= 9)
2060 			return 256 * 1024;
2061 		return 0;
2062 	case I915_FORMAT_MOD_Y_TILED_CCS:
2063 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2064 	case I915_FORMAT_MOD_Y_TILED:
2065 	case I915_FORMAT_MOD_Yf_TILED:
2066 		return 1 * 1024 * 1024;
2067 	default:
2068 		MISSING_CASE(fb->modifier);
2069 		return 0;
2070 	}
2071 }
2072 
2073 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2074 {
2075 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2076 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2077 
2078 	return INTEL_GEN(dev_priv) < 4 ||
2079 		(plane->has_fbc &&
2080 		 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2081 }
2082 
2083 struct i915_vma *
2084 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2085 			   const struct i915_ggtt_view *view,
2086 			   bool uses_fence,
2087 			   unsigned long *out_flags)
2088 {
2089 	struct drm_device *dev = fb->dev;
2090 	struct drm_i915_private *dev_priv = to_i915(dev);
2091 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2092 	intel_wakeref_t wakeref;
2093 	struct i915_vma *vma;
2094 	unsigned int pinctl;
2095 	u32 alignment;
2096 
2097 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2098 
2099 	alignment = intel_surf_alignment(fb, 0);
2100 
2101 	/* Note that the w/a also requires 64 PTE of padding following the
2102 	 * bo. We currently fill all unused PTE with the shadow page and so
2103 	 * we should always have valid PTE following the scanout preventing
2104 	 * the VT-d warning.
2105 	 */
2106 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2107 		alignment = 256 * 1024;
2108 
2109 	/*
2110 	 * Global gtt pte registers are special registers which actually forward
2111 	 * writes to a chunk of system memory. Which means that there is no risk
2112 	 * that the register values disappear as soon as we call
2113 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2114 	 * pin/unpin/fence and not more.
2115 	 */
2116 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2117 	i915_gem_object_lock(obj);
2118 
2119 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2120 
2121 	pinctl = 0;
2122 
2123 	/* Valleyview is definitely limited to scanning out the first
2124 	 * 512MiB. Lets presume this behaviour was inherited from the
2125 	 * g4x display engine and that all earlier gen are similarly
2126 	 * limited. Testing suggests that it is a little more
2127 	 * complicated than this. For example, Cherryview appears quite
2128 	 * happy to scanout from anywhere within its global aperture.
2129 	 */
2130 	if (HAS_GMCH(dev_priv))
2131 		pinctl |= PIN_MAPPABLE;
2132 
2133 	vma = i915_gem_object_pin_to_display_plane(obj,
2134 						   alignment, view, pinctl);
2135 	if (IS_ERR(vma))
2136 		goto err;
2137 
2138 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2139 		int ret;
2140 
2141 		/* Install a fence for tiled scan-out. Pre-i965 always needs a
2142 		 * fence, whereas 965+ only requires a fence if using
2143 		 * framebuffer compression.  For simplicity, we always, when
2144 		 * possible, install a fence as the cost is not that onerous.
2145 		 *
2146 		 * If we fail to fence the tiled scanout, then either the
2147 		 * modeset will reject the change (which is highly unlikely as
2148 		 * the affected systems, all but one, do not have unmappable
2149 		 * space) or we will not be able to enable full powersaving
2150 		 * techniques (also likely not to apply due to various limits
2151 		 * FBC and the like impose on the size of the buffer, which
2152 		 * presumably we violated anyway with this unmappable buffer).
2153 		 * Anyway, it is presumably better to stumble onwards with
2154 		 * something and try to run the system in a "less than optimal"
2155 		 * mode that matches the user configuration.
2156 		 */
2157 		ret = i915_vma_pin_fence(vma);
2158 		if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2159 			i915_gem_object_unpin_from_display_plane(vma);
2160 			vma = ERR_PTR(ret);
2161 			goto err;
2162 		}
2163 
2164 		if (ret == 0 && vma->fence)
2165 			*out_flags |= PLANE_HAS_FENCE;
2166 	}
2167 
2168 	i915_vma_get(vma);
2169 err:
2170 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2171 
2172 	i915_gem_object_unlock(obj);
2173 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2174 	return vma;
2175 }
2176 
2177 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2178 {
2179 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2180 
2181 	i915_gem_object_lock(vma->obj);
2182 	if (flags & PLANE_HAS_FENCE)
2183 		i915_vma_unpin_fence(vma);
2184 	i915_gem_object_unpin_from_display_plane(vma);
2185 	i915_gem_object_unlock(vma->obj);
2186 
2187 	i915_vma_put(vma);
2188 }
2189 
2190 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2191 			  unsigned int rotation)
2192 {
2193 	if (drm_rotation_90_or_270(rotation))
2194 		return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2195 	else
2196 		return fb->pitches[color_plane];
2197 }
2198 
2199 /*
2200  * Convert the x/y offsets into a linear offset.
2201  * Only valid with 0/180 degree rotation, which is fine since linear
2202  * offset is only used with linear buffers on pre-hsw and tiled buffers
2203  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2204  */
2205 u32 intel_fb_xy_to_linear(int x, int y,
2206 			  const struct intel_plane_state *state,
2207 			  int color_plane)
2208 {
2209 	const struct drm_framebuffer *fb = state->base.fb;
2210 	unsigned int cpp = fb->format->cpp[color_plane];
2211 	unsigned int pitch = state->color_plane[color_plane].stride;
2212 
2213 	return y * pitch + x * cpp;
2214 }
2215 
2216 /*
2217  * Add the x/y offsets derived from fb->offsets[] to the user
2218  * specified plane src x/y offsets. The resulting x/y offsets
2219  * specify the start of scanout from the beginning of the gtt mapping.
2220  */
2221 void intel_add_fb_offsets(int *x, int *y,
2222 			  const struct intel_plane_state *state,
2223 			  int color_plane)
2224 
2225 {
2226 	*x += state->color_plane[color_plane].x;
2227 	*y += state->color_plane[color_plane].y;
2228 }
2229 
2230 static u32 intel_adjust_tile_offset(int *x, int *y,
2231 				    unsigned int tile_width,
2232 				    unsigned int tile_height,
2233 				    unsigned int tile_size,
2234 				    unsigned int pitch_tiles,
2235 				    u32 old_offset,
2236 				    u32 new_offset)
2237 {
2238 	unsigned int pitch_pixels = pitch_tiles * tile_width;
2239 	unsigned int tiles;
2240 
2241 	WARN_ON(old_offset & (tile_size - 1));
2242 	WARN_ON(new_offset & (tile_size - 1));
2243 	WARN_ON(new_offset > old_offset);
2244 
2245 	tiles = (old_offset - new_offset) / tile_size;
2246 
2247 	*y += tiles / pitch_tiles * tile_height;
2248 	*x += tiles % pitch_tiles * tile_width;
2249 
2250 	/* minimize x in case it got needlessly big */
2251 	*y += *x / pitch_pixels * tile_height;
2252 	*x %= pitch_pixels;
2253 
2254 	return new_offset;
2255 }
2256 
2257 static bool is_surface_linear(u64 modifier, int color_plane)
2258 {
2259 	return modifier == DRM_FORMAT_MOD_LINEAR;
2260 }
2261 
2262 static u32 intel_adjust_aligned_offset(int *x, int *y,
2263 				       const struct drm_framebuffer *fb,
2264 				       int color_plane,
2265 				       unsigned int rotation,
2266 				       unsigned int pitch,
2267 				       u32 old_offset, u32 new_offset)
2268 {
2269 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2270 	unsigned int cpp = fb->format->cpp[color_plane];
2271 
2272 	WARN_ON(new_offset > old_offset);
2273 
2274 	if (!is_surface_linear(fb->modifier, color_plane)) {
2275 		unsigned int tile_size, tile_width, tile_height;
2276 		unsigned int pitch_tiles;
2277 
2278 		tile_size = intel_tile_size(dev_priv);
2279 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2280 
2281 		if (drm_rotation_90_or_270(rotation)) {
2282 			pitch_tiles = pitch / tile_height;
2283 			swap(tile_width, tile_height);
2284 		} else {
2285 			pitch_tiles = pitch / (tile_width * cpp);
2286 		}
2287 
2288 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2289 					 tile_size, pitch_tiles,
2290 					 old_offset, new_offset);
2291 	} else {
2292 		old_offset += *y * pitch + *x * cpp;
2293 
2294 		*y = (old_offset - new_offset) / pitch;
2295 		*x = ((old_offset - new_offset) - *y * pitch) / cpp;
2296 	}
2297 
2298 	return new_offset;
2299 }
2300 
2301 /*
2302  * Adjust the tile offset by moving the difference into
2303  * the x/y offsets.
2304  */
2305 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2306 					     const struct intel_plane_state *state,
2307 					     int color_plane,
2308 					     u32 old_offset, u32 new_offset)
2309 {
2310 	return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2311 					   state->base.rotation,
2312 					   state->color_plane[color_plane].stride,
2313 					   old_offset, new_offset);
2314 }
2315 
2316 /*
2317  * Computes the aligned offset to the base tile and adjusts
2318  * x, y. bytes per pixel is assumed to be a power-of-two.
2319  *
2320  * In the 90/270 rotated case, x and y are assumed
2321  * to be already rotated to match the rotated GTT view, and
2322  * pitch is the tile_height aligned framebuffer height.
2323  *
2324  * This function is used when computing the derived information
2325  * under intel_framebuffer, so using any of that information
2326  * here is not allowed. Anything under drm_framebuffer can be
2327  * used. This is why the user has to pass in the pitch since it
2328  * is specified in the rotated orientation.
2329  */
2330 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2331 					int *x, int *y,
2332 					const struct drm_framebuffer *fb,
2333 					int color_plane,
2334 					unsigned int pitch,
2335 					unsigned int rotation,
2336 					u32 alignment)
2337 {
2338 	unsigned int cpp = fb->format->cpp[color_plane];
2339 	u32 offset, offset_aligned;
2340 
2341 	if (alignment)
2342 		alignment--;
2343 
2344 	if (!is_surface_linear(fb->modifier, color_plane)) {
2345 		unsigned int tile_size, tile_width, tile_height;
2346 		unsigned int tile_rows, tiles, pitch_tiles;
2347 
2348 		tile_size = intel_tile_size(dev_priv);
2349 		intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2350 
2351 		if (drm_rotation_90_or_270(rotation)) {
2352 			pitch_tiles = pitch / tile_height;
2353 			swap(tile_width, tile_height);
2354 		} else {
2355 			pitch_tiles = pitch / (tile_width * cpp);
2356 		}
2357 
2358 		tile_rows = *y / tile_height;
2359 		*y %= tile_height;
2360 
2361 		tiles = *x / tile_width;
2362 		*x %= tile_width;
2363 
2364 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2365 		offset_aligned = offset & ~alignment;
2366 
2367 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2368 					 tile_size, pitch_tiles,
2369 					 offset, offset_aligned);
2370 	} else {
2371 		offset = *y * pitch + *x * cpp;
2372 		offset_aligned = offset & ~alignment;
2373 
2374 		*y = (offset & alignment) / pitch;
2375 		*x = ((offset & alignment) - *y * pitch) / cpp;
2376 	}
2377 
2378 	return offset_aligned;
2379 }
2380 
2381 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2382 					      const struct intel_plane_state *state,
2383 					      int color_plane)
2384 {
2385 	struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2386 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2387 	const struct drm_framebuffer *fb = state->base.fb;
2388 	unsigned int rotation = state->base.rotation;
2389 	int pitch = state->color_plane[color_plane].stride;
2390 	u32 alignment;
2391 
2392 	if (intel_plane->id == PLANE_CURSOR)
2393 		alignment = intel_cursor_alignment(dev_priv);
2394 	else
2395 		alignment = intel_surf_alignment(fb, color_plane);
2396 
2397 	return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2398 					    pitch, rotation, alignment);
2399 }
2400 
2401 /* Convert the fb->offset[] into x/y offsets */
2402 static int intel_fb_offset_to_xy(int *x, int *y,
2403 				 const struct drm_framebuffer *fb,
2404 				 int color_plane)
2405 {
2406 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2407 	unsigned int height;
2408 
2409 	if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2410 	    fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2411 		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2412 			      fb->offsets[color_plane], color_plane);
2413 		return -EINVAL;
2414 	}
2415 
2416 	height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2417 	height = ALIGN(height, intel_tile_height(fb, color_plane));
2418 
2419 	/* Catch potential overflows early */
2420 	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2421 			    fb->offsets[color_plane])) {
2422 		DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2423 			      fb->offsets[color_plane], fb->pitches[color_plane],
2424 			      color_plane);
2425 		return -ERANGE;
2426 	}
2427 
2428 	*x = 0;
2429 	*y = 0;
2430 
2431 	intel_adjust_aligned_offset(x, y,
2432 				    fb, color_plane, DRM_MODE_ROTATE_0,
2433 				    fb->pitches[color_plane],
2434 				    fb->offsets[color_plane], 0);
2435 
2436 	return 0;
2437 }
2438 
2439 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2440 {
2441 	switch (fb_modifier) {
2442 	case I915_FORMAT_MOD_X_TILED:
2443 		return I915_TILING_X;
2444 	case I915_FORMAT_MOD_Y_TILED:
2445 	case I915_FORMAT_MOD_Y_TILED_CCS:
2446 		return I915_TILING_Y;
2447 	default:
2448 		return I915_TILING_NONE;
2449 	}
2450 }
2451 
2452 /*
2453  * From the Sky Lake PRM:
2454  * "The Color Control Surface (CCS) contains the compression status of
2455  *  the cache-line pairs. The compression state of the cache-line pair
2456  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2457  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2458  *  cache-line-pairs. CCS is always Y tiled."
2459  *
2460  * Since cache line pairs refers to horizontally adjacent cache lines,
2461  * each cache line in the CCS corresponds to an area of 32x16 cache
2462  * lines on the main surface. Since each pixel is 4 bytes, this gives
2463  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2464  * main surface.
2465  */
2466 static const struct drm_format_info ccs_formats[] = {
2467 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2468 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2469 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2470 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2471 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2472 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2473 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2474 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2475 };
2476 
2477 static const struct drm_format_info *
2478 lookup_format_info(const struct drm_format_info formats[],
2479 		   int num_formats, u32 format)
2480 {
2481 	int i;
2482 
2483 	for (i = 0; i < num_formats; i++) {
2484 		if (formats[i].format == format)
2485 			return &formats[i];
2486 	}
2487 
2488 	return NULL;
2489 }
2490 
2491 static const struct drm_format_info *
2492 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2493 {
2494 	switch (cmd->modifier[0]) {
2495 	case I915_FORMAT_MOD_Y_TILED_CCS:
2496 	case I915_FORMAT_MOD_Yf_TILED_CCS:
2497 		return lookup_format_info(ccs_formats,
2498 					  ARRAY_SIZE(ccs_formats),
2499 					  cmd->pixel_format);
2500 	default:
2501 		return NULL;
2502 	}
2503 }
2504 
2505 bool is_ccs_modifier(u64 modifier)
2506 {
2507 	return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2508 	       modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2509 }
2510 
2511 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2512 			      u32 pixel_format, u64 modifier)
2513 {
2514 	struct intel_crtc *crtc;
2515 	struct intel_plane *plane;
2516 
2517 	/*
2518 	 * We assume the primary plane for pipe A has
2519 	 * the highest stride limits of them all.
2520 	 */
2521 	crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2522 	plane = to_intel_plane(crtc->base.primary);
2523 
2524 	return plane->max_stride(plane, pixel_format, modifier,
2525 				 DRM_MODE_ROTATE_0);
2526 }
2527 
2528 static
2529 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2530 			u32 pixel_format, u64 modifier)
2531 {
2532 	/*
2533 	 * Arbitrary limit for gen4+ chosen to match the
2534 	 * render engine max stride.
2535 	 *
2536 	 * The new CCS hash mode makes remapping impossible
2537 	 */
2538 	if (!is_ccs_modifier(modifier)) {
2539 		if (INTEL_GEN(dev_priv) >= 7)
2540 			return 256*1024;
2541 		else if (INTEL_GEN(dev_priv) >= 4)
2542 			return 128*1024;
2543 	}
2544 
2545 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2546 }
2547 
2548 static u32
2549 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2550 {
2551 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
2552 
2553 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2554 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2555 							   fb->format->format,
2556 							   fb->modifier);
2557 
2558 		/*
2559 		 * To make remapping with linear generally feasible
2560 		 * we need the stride to be page aligned.
2561 		 */
2562 		if (fb->pitches[color_plane] > max_stride)
2563 			return intel_tile_size(dev_priv);
2564 		else
2565 			return 64;
2566 	} else {
2567 		return intel_tile_width_bytes(fb, color_plane);
2568 	}
2569 }
2570 
2571 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2572 {
2573 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2574 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2575 	const struct drm_framebuffer *fb = plane_state->base.fb;
2576 	int i;
2577 
2578 	/* We don't want to deal with remapping with cursors */
2579 	if (plane->id == PLANE_CURSOR)
2580 		return false;
2581 
2582 	/*
2583 	 * The display engine limits already match/exceed the
2584 	 * render engine limits, so not much point in remapping.
2585 	 * Would also need to deal with the fence POT alignment
2586 	 * and gen2 2KiB GTT tile size.
2587 	 */
2588 	if (INTEL_GEN(dev_priv) < 4)
2589 		return false;
2590 
2591 	/*
2592 	 * The new CCS hash mode isn't compatible with remapping as
2593 	 * the virtual address of the pages affects the compressed data.
2594 	 */
2595 	if (is_ccs_modifier(fb->modifier))
2596 		return false;
2597 
2598 	/* Linear needs a page aligned stride for remapping */
2599 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2600 		unsigned int alignment = intel_tile_size(dev_priv) - 1;
2601 
2602 		for (i = 0; i < fb->format->num_planes; i++) {
2603 			if (fb->pitches[i] & alignment)
2604 				return false;
2605 		}
2606 	}
2607 
2608 	return true;
2609 }
2610 
2611 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2612 {
2613 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2614 	const struct drm_framebuffer *fb = plane_state->base.fb;
2615 	unsigned int rotation = plane_state->base.rotation;
2616 	u32 stride, max_stride;
2617 
2618 	/*
2619 	 * No remapping for invisible planes since we don't have
2620 	 * an actual source viewport to remap.
2621 	 */
2622 	if (!plane_state->base.visible)
2623 		return false;
2624 
2625 	if (!intel_plane_can_remap(plane_state))
2626 		return false;
2627 
2628 	/*
2629 	 * FIXME: aux plane limits on gen9+ are
2630 	 * unclear in Bspec, for now no checking.
2631 	 */
2632 	stride = intel_fb_pitch(fb, 0, rotation);
2633 	max_stride = plane->max_stride(plane, fb->format->format,
2634 				       fb->modifier, rotation);
2635 
2636 	return stride > max_stride;
2637 }
2638 
2639 static int
2640 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2641 		   struct drm_framebuffer *fb)
2642 {
2643 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2644 	struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2645 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2646 	u32 gtt_offset_rotated = 0;
2647 	unsigned int max_size = 0;
2648 	int i, num_planes = fb->format->num_planes;
2649 	unsigned int tile_size = intel_tile_size(dev_priv);
2650 
2651 	for (i = 0; i < num_planes; i++) {
2652 		unsigned int width, height;
2653 		unsigned int cpp, size;
2654 		u32 offset;
2655 		int x, y;
2656 		int ret;
2657 
2658 		cpp = fb->format->cpp[i];
2659 		width = drm_framebuffer_plane_width(fb->width, fb, i);
2660 		height = drm_framebuffer_plane_height(fb->height, fb, i);
2661 
2662 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2663 		if (ret) {
2664 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2665 				      i, fb->offsets[i]);
2666 			return ret;
2667 		}
2668 
2669 		if (is_ccs_modifier(fb->modifier) && i == 1) {
2670 			int hsub = fb->format->hsub;
2671 			int vsub = fb->format->vsub;
2672 			int tile_width, tile_height;
2673 			int main_x, main_y;
2674 			int ccs_x, ccs_y;
2675 
2676 			intel_tile_dims(fb, i, &tile_width, &tile_height);
2677 			tile_width *= hsub;
2678 			tile_height *= vsub;
2679 
2680 			ccs_x = (x * hsub) % tile_width;
2681 			ccs_y = (y * vsub) % tile_height;
2682 			main_x = intel_fb->normal[0].x % tile_width;
2683 			main_y = intel_fb->normal[0].y % tile_height;
2684 
2685 			/*
2686 			 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2687 			 * x/y offsets must match between CCS and the main surface.
2688 			 */
2689 			if (main_x != ccs_x || main_y != ccs_y) {
2690 				DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2691 					      main_x, main_y,
2692 					      ccs_x, ccs_y,
2693 					      intel_fb->normal[0].x,
2694 					      intel_fb->normal[0].y,
2695 					      x, y);
2696 				return -EINVAL;
2697 			}
2698 		}
2699 
2700 		/*
2701 		 * The fence (if used) is aligned to the start of the object
2702 		 * so having the framebuffer wrap around across the edge of the
2703 		 * fenced region doesn't really work. We have no API to configure
2704 		 * the fence start offset within the object (nor could we probably
2705 		 * on gen2/3). So it's just easier if we just require that the
2706 		 * fb layout agrees with the fence layout. We already check that the
2707 		 * fb stride matches the fence stride elsewhere.
2708 		 */
2709 		if (i == 0 && i915_gem_object_is_tiled(obj) &&
2710 		    (x + width) * cpp > fb->pitches[i]) {
2711 			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2712 				      i, fb->offsets[i]);
2713 			return -EINVAL;
2714 		}
2715 
2716 		/*
2717 		 * First pixel of the framebuffer from
2718 		 * the start of the normal gtt mapping.
2719 		 */
2720 		intel_fb->normal[i].x = x;
2721 		intel_fb->normal[i].y = y;
2722 
2723 		offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2724 						      fb->pitches[i],
2725 						      DRM_MODE_ROTATE_0,
2726 						      tile_size);
2727 		offset /= tile_size;
2728 
2729 		if (!is_surface_linear(fb->modifier, i)) {
2730 			unsigned int tile_width, tile_height;
2731 			unsigned int pitch_tiles;
2732 			struct drm_rect r;
2733 
2734 			intel_tile_dims(fb, i, &tile_width, &tile_height);
2735 
2736 			rot_info->plane[i].offset = offset;
2737 			rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2738 			rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2739 			rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2740 
2741 			intel_fb->rotated[i].pitch =
2742 				rot_info->plane[i].height * tile_height;
2743 
2744 			/* how many tiles does this plane need */
2745 			size = rot_info->plane[i].stride * rot_info->plane[i].height;
2746 			/*
2747 			 * If the plane isn't horizontally tile aligned,
2748 			 * we need one more tile.
2749 			 */
2750 			if (x != 0)
2751 				size++;
2752 
2753 			/* rotate the x/y offsets to match the GTT view */
2754 			r.x1 = x;
2755 			r.y1 = y;
2756 			r.x2 = x + width;
2757 			r.y2 = y + height;
2758 			drm_rect_rotate(&r,
2759 					rot_info->plane[i].width * tile_width,
2760 					rot_info->plane[i].height * tile_height,
2761 					DRM_MODE_ROTATE_270);
2762 			x = r.x1;
2763 			y = r.y1;
2764 
2765 			/* rotate the tile dimensions to match the GTT view */
2766 			pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2767 			swap(tile_width, tile_height);
2768 
2769 			/*
2770 			 * We only keep the x/y offsets, so push all of the
2771 			 * gtt offset into the x/y offsets.
2772 			 */
2773 			intel_adjust_tile_offset(&x, &y,
2774 						 tile_width, tile_height,
2775 						 tile_size, pitch_tiles,
2776 						 gtt_offset_rotated * tile_size, 0);
2777 
2778 			gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2779 
2780 			/*
2781 			 * First pixel of the framebuffer from
2782 			 * the start of the rotated gtt mapping.
2783 			 */
2784 			intel_fb->rotated[i].x = x;
2785 			intel_fb->rotated[i].y = y;
2786 		} else {
2787 			size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2788 					    x * cpp, tile_size);
2789 		}
2790 
2791 		/* how many tiles in total needed in the bo */
2792 		max_size = max(max_size, offset + size);
2793 	}
2794 
2795 	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2796 		DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2797 			      mul_u32_u32(max_size, tile_size), obj->base.size);
2798 		return -EINVAL;
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 static void
2805 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2806 {
2807 	struct drm_i915_private *dev_priv =
2808 		to_i915(plane_state->base.plane->dev);
2809 	struct drm_framebuffer *fb = plane_state->base.fb;
2810 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2811 	struct intel_rotation_info *info = &plane_state->view.rotated;
2812 	unsigned int rotation = plane_state->base.rotation;
2813 	int i, num_planes = fb->format->num_planes;
2814 	unsigned int tile_size = intel_tile_size(dev_priv);
2815 	unsigned int src_x, src_y;
2816 	unsigned int src_w, src_h;
2817 	u32 gtt_offset = 0;
2818 
2819 	memset(&plane_state->view, 0, sizeof(plane_state->view));
2820 	plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2821 		I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2822 
2823 	src_x = plane_state->base.src.x1 >> 16;
2824 	src_y = plane_state->base.src.y1 >> 16;
2825 	src_w = drm_rect_width(&plane_state->base.src) >> 16;
2826 	src_h = drm_rect_height(&plane_state->base.src) >> 16;
2827 
2828 	WARN_ON(is_ccs_modifier(fb->modifier));
2829 
2830 	/* Make src coordinates relative to the viewport */
2831 	drm_rect_translate(&plane_state->base.src,
2832 			   -(src_x << 16), -(src_y << 16));
2833 
2834 	/* Rotate src coordinates to match rotated GTT view */
2835 	if (drm_rotation_90_or_270(rotation))
2836 		drm_rect_rotate(&plane_state->base.src,
2837 				src_w << 16, src_h << 16,
2838 				DRM_MODE_ROTATE_270);
2839 
2840 	for (i = 0; i < num_planes; i++) {
2841 		unsigned int hsub = i ? fb->format->hsub : 1;
2842 		unsigned int vsub = i ? fb->format->vsub : 1;
2843 		unsigned int cpp = fb->format->cpp[i];
2844 		unsigned int tile_width, tile_height;
2845 		unsigned int width, height;
2846 		unsigned int pitch_tiles;
2847 		unsigned int x, y;
2848 		u32 offset;
2849 
2850 		intel_tile_dims(fb, i, &tile_width, &tile_height);
2851 
2852 		x = src_x / hsub;
2853 		y = src_y / vsub;
2854 		width = src_w / hsub;
2855 		height = src_h / vsub;
2856 
2857 		/*
2858 		 * First pixel of the src viewport from the
2859 		 * start of the normal gtt mapping.
2860 		 */
2861 		x += intel_fb->normal[i].x;
2862 		y += intel_fb->normal[i].y;
2863 
2864 		offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2865 						      fb, i, fb->pitches[i],
2866 						      DRM_MODE_ROTATE_0, tile_size);
2867 		offset /= tile_size;
2868 
2869 		info->plane[i].offset = offset;
2870 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2871 						     tile_width * cpp);
2872 		info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2873 		info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2874 
2875 		if (drm_rotation_90_or_270(rotation)) {
2876 			struct drm_rect r;
2877 
2878 			/* rotate the x/y offsets to match the GTT view */
2879 			r.x1 = x;
2880 			r.y1 = y;
2881 			r.x2 = x + width;
2882 			r.y2 = y + height;
2883 			drm_rect_rotate(&r,
2884 					info->plane[i].width * tile_width,
2885 					info->plane[i].height * tile_height,
2886 					DRM_MODE_ROTATE_270);
2887 			x = r.x1;
2888 			y = r.y1;
2889 
2890 			pitch_tiles = info->plane[i].height;
2891 			plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2892 
2893 			/* rotate the tile dimensions to match the GTT view */
2894 			swap(tile_width, tile_height);
2895 		} else {
2896 			pitch_tiles = info->plane[i].width;
2897 			plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2898 		}
2899 
2900 		/*
2901 		 * We only keep the x/y offsets, so push all of the
2902 		 * gtt offset into the x/y offsets.
2903 		 */
2904 		intel_adjust_tile_offset(&x, &y,
2905 					 tile_width, tile_height,
2906 					 tile_size, pitch_tiles,
2907 					 gtt_offset * tile_size, 0);
2908 
2909 		gtt_offset += info->plane[i].width * info->plane[i].height;
2910 
2911 		plane_state->color_plane[i].offset = 0;
2912 		plane_state->color_plane[i].x = x;
2913 		plane_state->color_plane[i].y = y;
2914 	}
2915 }
2916 
2917 static int
2918 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2919 {
2920 	const struct intel_framebuffer *fb =
2921 		to_intel_framebuffer(plane_state->base.fb);
2922 	unsigned int rotation = plane_state->base.rotation;
2923 	int i, num_planes;
2924 
2925 	if (!fb)
2926 		return 0;
2927 
2928 	num_planes = fb->base.format->num_planes;
2929 
2930 	if (intel_plane_needs_remap(plane_state)) {
2931 		intel_plane_remap_gtt(plane_state);
2932 
2933 		/*
2934 		 * Sometimes even remapping can't overcome
2935 		 * the stride limitations :( Can happen with
2936 		 * big plane sizes and suitably misaligned
2937 		 * offsets.
2938 		 */
2939 		return intel_plane_check_stride(plane_state);
2940 	}
2941 
2942 	intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2943 
2944 	for (i = 0; i < num_planes; i++) {
2945 		plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2946 		plane_state->color_plane[i].offset = 0;
2947 
2948 		if (drm_rotation_90_or_270(rotation)) {
2949 			plane_state->color_plane[i].x = fb->rotated[i].x;
2950 			plane_state->color_plane[i].y = fb->rotated[i].y;
2951 		} else {
2952 			plane_state->color_plane[i].x = fb->normal[i].x;
2953 			plane_state->color_plane[i].y = fb->normal[i].y;
2954 		}
2955 	}
2956 
2957 	/* Rotate src coordinates to match rotated GTT view */
2958 	if (drm_rotation_90_or_270(rotation))
2959 		drm_rect_rotate(&plane_state->base.src,
2960 				fb->base.width << 16, fb->base.height << 16,
2961 				DRM_MODE_ROTATE_270);
2962 
2963 	return intel_plane_check_stride(plane_state);
2964 }
2965 
2966 static int i9xx_format_to_fourcc(int format)
2967 {
2968 	switch (format) {
2969 	case DISPPLANE_8BPP:
2970 		return DRM_FORMAT_C8;
2971 	case DISPPLANE_BGRX555:
2972 		return DRM_FORMAT_XRGB1555;
2973 	case DISPPLANE_BGRX565:
2974 		return DRM_FORMAT_RGB565;
2975 	default:
2976 	case DISPPLANE_BGRX888:
2977 		return DRM_FORMAT_XRGB8888;
2978 	case DISPPLANE_RGBX888:
2979 		return DRM_FORMAT_XBGR8888;
2980 	case DISPPLANE_BGRX101010:
2981 		return DRM_FORMAT_XRGB2101010;
2982 	case DISPPLANE_RGBX101010:
2983 		return DRM_FORMAT_XBGR2101010;
2984 	}
2985 }
2986 
2987 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2988 {
2989 	switch (format) {
2990 	case PLANE_CTL_FORMAT_RGB_565:
2991 		return DRM_FORMAT_RGB565;
2992 	case PLANE_CTL_FORMAT_NV12:
2993 		return DRM_FORMAT_NV12;
2994 	case PLANE_CTL_FORMAT_P010:
2995 		return DRM_FORMAT_P010;
2996 	case PLANE_CTL_FORMAT_P012:
2997 		return DRM_FORMAT_P012;
2998 	case PLANE_CTL_FORMAT_P016:
2999 		return DRM_FORMAT_P016;
3000 	case PLANE_CTL_FORMAT_Y210:
3001 		return DRM_FORMAT_Y210;
3002 	case PLANE_CTL_FORMAT_Y212:
3003 		return DRM_FORMAT_Y212;
3004 	case PLANE_CTL_FORMAT_Y216:
3005 		return DRM_FORMAT_Y216;
3006 	case PLANE_CTL_FORMAT_Y410:
3007 		return DRM_FORMAT_XVYU2101010;
3008 	case PLANE_CTL_FORMAT_Y412:
3009 		return DRM_FORMAT_XVYU12_16161616;
3010 	case PLANE_CTL_FORMAT_Y416:
3011 		return DRM_FORMAT_XVYU16161616;
3012 	default:
3013 	case PLANE_CTL_FORMAT_XRGB_8888:
3014 		if (rgb_order) {
3015 			if (alpha)
3016 				return DRM_FORMAT_ABGR8888;
3017 			else
3018 				return DRM_FORMAT_XBGR8888;
3019 		} else {
3020 			if (alpha)
3021 				return DRM_FORMAT_ARGB8888;
3022 			else
3023 				return DRM_FORMAT_XRGB8888;
3024 		}
3025 	case PLANE_CTL_FORMAT_XRGB_2101010:
3026 		if (rgb_order)
3027 			return DRM_FORMAT_XBGR2101010;
3028 		else
3029 			return DRM_FORMAT_XRGB2101010;
3030 	case PLANE_CTL_FORMAT_XRGB_16161616F:
3031 		if (rgb_order) {
3032 			if (alpha)
3033 				return DRM_FORMAT_ABGR16161616F;
3034 			else
3035 				return DRM_FORMAT_XBGR16161616F;
3036 		} else {
3037 			if (alpha)
3038 				return DRM_FORMAT_ARGB16161616F;
3039 			else
3040 				return DRM_FORMAT_XRGB16161616F;
3041 		}
3042 	}
3043 }
3044 
3045 static bool
3046 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3047 			      struct intel_initial_plane_config *plane_config)
3048 {
3049 	struct drm_device *dev = crtc->base.dev;
3050 	struct drm_i915_private *dev_priv = to_i915(dev);
3051 	struct drm_i915_gem_object *obj = NULL;
3052 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3053 	struct drm_framebuffer *fb = &plane_config->fb->base;
3054 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3055 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
3056 				    PAGE_SIZE);
3057 
3058 	size_aligned -= base_aligned;
3059 
3060 	if (plane_config->size == 0)
3061 		return false;
3062 
3063 	/* If the FB is too big, just don't use it since fbdev is not very
3064 	 * important and we should probably use that space with FBC or other
3065 	 * features. */
3066 	if (size_aligned * 2 > dev_priv->stolen_usable_size)
3067 		return false;
3068 
3069 	switch (fb->modifier) {
3070 	case DRM_FORMAT_MOD_LINEAR:
3071 	case I915_FORMAT_MOD_X_TILED:
3072 	case I915_FORMAT_MOD_Y_TILED:
3073 		break;
3074 	default:
3075 		DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3076 				 fb->modifier);
3077 		return false;
3078 	}
3079 
3080 	mutex_lock(&dev->struct_mutex);
3081 	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3082 							     base_aligned,
3083 							     base_aligned,
3084 							     size_aligned);
3085 	mutex_unlock(&dev->struct_mutex);
3086 	if (!obj)
3087 		return false;
3088 
3089 	switch (plane_config->tiling) {
3090 	case I915_TILING_NONE:
3091 		break;
3092 	case I915_TILING_X:
3093 	case I915_TILING_Y:
3094 		obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3095 		break;
3096 	default:
3097 		MISSING_CASE(plane_config->tiling);
3098 		return false;
3099 	}
3100 
3101 	mode_cmd.pixel_format = fb->format->format;
3102 	mode_cmd.width = fb->width;
3103 	mode_cmd.height = fb->height;
3104 	mode_cmd.pitches[0] = fb->pitches[0];
3105 	mode_cmd.modifier[0] = fb->modifier;
3106 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3107 
3108 	if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3109 		DRM_DEBUG_KMS("intel fb init failed\n");
3110 		goto out_unref_obj;
3111 	}
3112 
3113 
3114 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3115 	return true;
3116 
3117 out_unref_obj:
3118 	i915_gem_object_put(obj);
3119 	return false;
3120 }
3121 
3122 static void
3123 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3124 			struct intel_plane_state *plane_state,
3125 			bool visible)
3126 {
3127 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3128 
3129 	plane_state->base.visible = visible;
3130 
3131 	if (visible)
3132 		crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3133 	else
3134 		crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3135 }
3136 
3137 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3138 {
3139 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3140 	struct drm_plane *plane;
3141 
3142 	/*
3143 	 * Active_planes aliases if multiple "primary" or cursor planes
3144 	 * have been used on the same (or wrong) pipe. plane_mask uses
3145 	 * unique ids, hence we can use that to reconstruct active_planes.
3146 	 */
3147 	crtc_state->active_planes = 0;
3148 
3149 	drm_for_each_plane_mask(plane, &dev_priv->drm,
3150 				crtc_state->base.plane_mask)
3151 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3152 }
3153 
3154 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3155 					 struct intel_plane *plane)
3156 {
3157 	struct intel_crtc_state *crtc_state =
3158 		to_intel_crtc_state(crtc->base.state);
3159 	struct intel_plane_state *plane_state =
3160 		to_intel_plane_state(plane->base.state);
3161 
3162 	DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3163 		      plane->base.base.id, plane->base.name,
3164 		      crtc->base.base.id, crtc->base.name);
3165 
3166 	intel_set_plane_visible(crtc_state, plane_state, false);
3167 	fixup_active_planes(crtc_state);
3168 	crtc_state->data_rate[plane->id] = 0;
3169 
3170 	if (plane->id == PLANE_PRIMARY)
3171 		intel_pre_disable_primary_noatomic(&crtc->base);
3172 
3173 	intel_disable_plane(plane, crtc_state);
3174 }
3175 
3176 static void
3177 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3178 			     struct intel_initial_plane_config *plane_config)
3179 {
3180 	struct drm_device *dev = intel_crtc->base.dev;
3181 	struct drm_i915_private *dev_priv = to_i915(dev);
3182 	struct drm_crtc *c;
3183 	struct drm_i915_gem_object *obj;
3184 	struct drm_plane *primary = intel_crtc->base.primary;
3185 	struct drm_plane_state *plane_state = primary->state;
3186 	struct intel_plane *intel_plane = to_intel_plane(primary);
3187 	struct intel_plane_state *intel_state =
3188 		to_intel_plane_state(plane_state);
3189 	struct drm_framebuffer *fb;
3190 
3191 	if (!plane_config->fb)
3192 		return;
3193 
3194 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3195 		fb = &plane_config->fb->base;
3196 		goto valid_fb;
3197 	}
3198 
3199 	kfree(plane_config->fb);
3200 
3201 	/*
3202 	 * Failed to alloc the obj, check to see if we should share
3203 	 * an fb with another CRTC instead
3204 	 */
3205 	for_each_crtc(dev, c) {
3206 		struct intel_plane_state *state;
3207 
3208 		if (c == &intel_crtc->base)
3209 			continue;
3210 
3211 		if (!to_intel_crtc(c)->active)
3212 			continue;
3213 
3214 		state = to_intel_plane_state(c->primary->state);
3215 		if (!state->vma)
3216 			continue;
3217 
3218 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
3219 			fb = state->base.fb;
3220 			drm_framebuffer_get(fb);
3221 			goto valid_fb;
3222 		}
3223 	}
3224 
3225 	/*
3226 	 * We've failed to reconstruct the BIOS FB.  Current display state
3227 	 * indicates that the primary plane is visible, but has a NULL FB,
3228 	 * which will lead to problems later if we don't fix it up.  The
3229 	 * simplest solution is to just disable the primary plane now and
3230 	 * pretend the BIOS never had it enabled.
3231 	 */
3232 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
3233 
3234 	return;
3235 
3236 valid_fb:
3237 	intel_state->base.rotation = plane_config->rotation;
3238 	intel_fill_fb_ggtt_view(&intel_state->view, fb,
3239 				intel_state->base.rotation);
3240 	intel_state->color_plane[0].stride =
3241 		intel_fb_pitch(fb, 0, intel_state->base.rotation);
3242 
3243 	mutex_lock(&dev->struct_mutex);
3244 	intel_state->vma =
3245 		intel_pin_and_fence_fb_obj(fb,
3246 					   &intel_state->view,
3247 					   intel_plane_uses_fence(intel_state),
3248 					   &intel_state->flags);
3249 	mutex_unlock(&dev->struct_mutex);
3250 	if (IS_ERR(intel_state->vma)) {
3251 		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3252 			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
3253 
3254 		intel_state->vma = NULL;
3255 		drm_framebuffer_put(fb);
3256 		return;
3257 	}
3258 
3259 	obj = intel_fb_obj(fb);
3260 	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
3261 
3262 	plane_state->src_x = 0;
3263 	plane_state->src_y = 0;
3264 	plane_state->src_w = fb->width << 16;
3265 	plane_state->src_h = fb->height << 16;
3266 
3267 	plane_state->crtc_x = 0;
3268 	plane_state->crtc_y = 0;
3269 	plane_state->crtc_w = fb->width;
3270 	plane_state->crtc_h = fb->height;
3271 
3272 	intel_state->base.src = drm_plane_state_src(plane_state);
3273 	intel_state->base.dst = drm_plane_state_dest(plane_state);
3274 
3275 	if (i915_gem_object_is_tiled(obj))
3276 		dev_priv->preserve_bios_swizzle = true;
3277 
3278 	plane_state->fb = fb;
3279 	plane_state->crtc = &intel_crtc->base;
3280 
3281 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3282 		  &obj->frontbuffer_bits);
3283 }
3284 
3285 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3286 			       int color_plane,
3287 			       unsigned int rotation)
3288 {
3289 	int cpp = fb->format->cpp[color_plane];
3290 
3291 	switch (fb->modifier) {
3292 	case DRM_FORMAT_MOD_LINEAR:
3293 	case I915_FORMAT_MOD_X_TILED:
3294 		return 4096;
3295 	case I915_FORMAT_MOD_Y_TILED_CCS:
3296 	case I915_FORMAT_MOD_Yf_TILED_CCS:
3297 		/* FIXME AUX plane? */
3298 	case I915_FORMAT_MOD_Y_TILED:
3299 	case I915_FORMAT_MOD_Yf_TILED:
3300 		if (cpp == 8)
3301 			return 2048;
3302 		else
3303 			return 4096;
3304 	default:
3305 		MISSING_CASE(fb->modifier);
3306 		return 2048;
3307 	}
3308 }
3309 
3310 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3311 			       int color_plane,
3312 			       unsigned int rotation)
3313 {
3314 	int cpp = fb->format->cpp[color_plane];
3315 
3316 	switch (fb->modifier) {
3317 	case DRM_FORMAT_MOD_LINEAR:
3318 	case I915_FORMAT_MOD_X_TILED:
3319 		if (cpp == 8)
3320 			return 4096;
3321 		else
3322 			return 5120;
3323 	case I915_FORMAT_MOD_Y_TILED_CCS:
3324 	case I915_FORMAT_MOD_Yf_TILED_CCS:
3325 		/* FIXME AUX plane? */
3326 	case I915_FORMAT_MOD_Y_TILED:
3327 	case I915_FORMAT_MOD_Yf_TILED:
3328 		if (cpp == 8)
3329 			return 2048;
3330 		else
3331 			return 5120;
3332 	default:
3333 		MISSING_CASE(fb->modifier);
3334 		return 2048;
3335 	}
3336 }
3337 
3338 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3339 			       int color_plane,
3340 			       unsigned int rotation)
3341 {
3342 	return 5120;
3343 }
3344 
3345 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3346 					   int main_x, int main_y, u32 main_offset)
3347 {
3348 	const struct drm_framebuffer *fb = plane_state->base.fb;
3349 	int hsub = fb->format->hsub;
3350 	int vsub = fb->format->vsub;
3351 	int aux_x = plane_state->color_plane[1].x;
3352 	int aux_y = plane_state->color_plane[1].y;
3353 	u32 aux_offset = plane_state->color_plane[1].offset;
3354 	u32 alignment = intel_surf_alignment(fb, 1);
3355 
3356 	while (aux_offset >= main_offset && aux_y <= main_y) {
3357 		int x, y;
3358 
3359 		if (aux_x == main_x && aux_y == main_y)
3360 			break;
3361 
3362 		if (aux_offset == 0)
3363 			break;
3364 
3365 		x = aux_x / hsub;
3366 		y = aux_y / vsub;
3367 		aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3368 							       aux_offset, aux_offset - alignment);
3369 		aux_x = x * hsub + aux_x % hsub;
3370 		aux_y = y * vsub + aux_y % vsub;
3371 	}
3372 
3373 	if (aux_x != main_x || aux_y != main_y)
3374 		return false;
3375 
3376 	plane_state->color_plane[1].offset = aux_offset;
3377 	plane_state->color_plane[1].x = aux_x;
3378 	plane_state->color_plane[1].y = aux_y;
3379 
3380 	return true;
3381 }
3382 
3383 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3384 {
3385 	struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3386 	const struct drm_framebuffer *fb = plane_state->base.fb;
3387 	unsigned int rotation = plane_state->base.rotation;
3388 	int x = plane_state->base.src.x1 >> 16;
3389 	int y = plane_state->base.src.y1 >> 16;
3390 	int w = drm_rect_width(&plane_state->base.src) >> 16;
3391 	int h = drm_rect_height(&plane_state->base.src) >> 16;
3392 	int max_width;
3393 	int max_height = 4096;
3394 	u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3395 
3396 	if (INTEL_GEN(dev_priv) >= 11)
3397 		max_width = icl_max_plane_width(fb, 0, rotation);
3398 	else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3399 		max_width = glk_max_plane_width(fb, 0, rotation);
3400 	else
3401 		max_width = skl_max_plane_width(fb, 0, rotation);
3402 
3403 	if (w > max_width || h > max_height) {
3404 		DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3405 			      w, h, max_width, max_height);
3406 		return -EINVAL;
3407 	}
3408 
3409 	intel_add_fb_offsets(&x, &y, plane_state, 0);
3410 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3411 	alignment = intel_surf_alignment(fb, 0);
3412 
3413 	/*
3414 	 * AUX surface offset is specified as the distance from the
3415 	 * main surface offset, and it must be non-negative. Make
3416 	 * sure that is what we will get.
3417 	 */
3418 	if (offset > aux_offset)
3419 		offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3420 							   offset, aux_offset & ~(alignment - 1));
3421 
3422 	/*
3423 	 * When using an X-tiled surface, the plane blows up
3424 	 * if the x offset + width exceed the stride.
3425 	 *
3426 	 * TODO: linear and Y-tiled seem fine, Yf untested,
3427 	 */
3428 	if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3429 		int cpp = fb->format->cpp[0];
3430 
3431 		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3432 			if (offset == 0) {
3433 				DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3434 				return -EINVAL;
3435 			}
3436 
3437 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3438 								   offset, offset - alignment);
3439 		}
3440 	}
3441 
3442 	/*
3443 	 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3444 	 * they match with the main surface x/y offsets.
3445 	 */
3446 	if (is_ccs_modifier(fb->modifier)) {
3447 		while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3448 			if (offset == 0)
3449 				break;
3450 
3451 			offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3452 								   offset, offset - alignment);
3453 		}
3454 
3455 		if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3456 			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3457 			return -EINVAL;
3458 		}
3459 	}
3460 
3461 	plane_state->color_plane[0].offset = offset;
3462 	plane_state->color_plane[0].x = x;
3463 	plane_state->color_plane[0].y = y;
3464 
3465 	/*
3466 	 * Put the final coordinates back so that the src
3467 	 * coordinate checks will see the right values.
3468 	 */
3469 	drm_rect_translate(&plane_state->base.src,
3470 			   (x << 16) - plane_state->base.src.x1,
3471 			   (y << 16) - plane_state->base.src.y1);
3472 
3473 	return 0;
3474 }
3475 
3476 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3477 {
3478 	const struct drm_framebuffer *fb = plane_state->base.fb;
3479 	unsigned int rotation = plane_state->base.rotation;
3480 	int max_width = skl_max_plane_width(fb, 1, rotation);
3481 	int max_height = 4096;
3482 	int x = plane_state->base.src.x1 >> 17;
3483 	int y = plane_state->base.src.y1 >> 17;
3484 	int w = drm_rect_width(&plane_state->base.src) >> 17;
3485 	int h = drm_rect_height(&plane_state->base.src) >> 17;
3486 	u32 offset;
3487 
3488 	intel_add_fb_offsets(&x, &y, plane_state, 1);
3489 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3490 
3491 	/* FIXME not quite sure how/if these apply to the chroma plane */
3492 	if (w > max_width || h > max_height) {
3493 		DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3494 			      w, h, max_width, max_height);
3495 		return -EINVAL;
3496 	}
3497 
3498 	plane_state->color_plane[1].offset = offset;
3499 	plane_state->color_plane[1].x = x;
3500 	plane_state->color_plane[1].y = y;
3501 
3502 	return 0;
3503 }
3504 
3505 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3506 {
3507 	const struct drm_framebuffer *fb = plane_state->base.fb;
3508 	int src_x = plane_state->base.src.x1 >> 16;
3509 	int src_y = plane_state->base.src.y1 >> 16;
3510 	int hsub = fb->format->hsub;
3511 	int vsub = fb->format->vsub;
3512 	int x = src_x / hsub;
3513 	int y = src_y / vsub;
3514 	u32 offset;
3515 
3516 	intel_add_fb_offsets(&x, &y, plane_state, 1);
3517 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3518 
3519 	plane_state->color_plane[1].offset = offset;
3520 	plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3521 	plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3522 
3523 	return 0;
3524 }
3525 
3526 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3527 {
3528 	const struct drm_framebuffer *fb = plane_state->base.fb;
3529 	int ret;
3530 
3531 	ret = intel_plane_compute_gtt(plane_state);
3532 	if (ret)
3533 		return ret;
3534 
3535 	if (!plane_state->base.visible)
3536 		return 0;
3537 
3538 	/*
3539 	 * Handle the AUX surface first since
3540 	 * the main surface setup depends on it.
3541 	 */
3542 	if (is_planar_yuv_format(fb->format->format)) {
3543 		ret = skl_check_nv12_aux_surface(plane_state);
3544 		if (ret)
3545 			return ret;
3546 	} else if (is_ccs_modifier(fb->modifier)) {
3547 		ret = skl_check_ccs_aux_surface(plane_state);
3548 		if (ret)
3549 			return ret;
3550 	} else {
3551 		plane_state->color_plane[1].offset = ~0xfff;
3552 		plane_state->color_plane[1].x = 0;
3553 		plane_state->color_plane[1].y = 0;
3554 	}
3555 
3556 	ret = skl_check_main_surface(plane_state);
3557 	if (ret)
3558 		return ret;
3559 
3560 	return 0;
3561 }
3562 
3563 unsigned int
3564 i9xx_plane_max_stride(struct intel_plane *plane,
3565 		      u32 pixel_format, u64 modifier,
3566 		      unsigned int rotation)
3567 {
3568 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3569 
3570 	if (!HAS_GMCH(dev_priv)) {
3571 		return 32*1024;
3572 	} else if (INTEL_GEN(dev_priv) >= 4) {
3573 		if (modifier == I915_FORMAT_MOD_X_TILED)
3574 			return 16*1024;
3575 		else
3576 			return 32*1024;
3577 	} else if (INTEL_GEN(dev_priv) >= 3) {
3578 		if (modifier == I915_FORMAT_MOD_X_TILED)
3579 			return 8*1024;
3580 		else
3581 			return 16*1024;
3582 	} else {
3583 		if (plane->i9xx_plane == PLANE_C)
3584 			return 4*1024;
3585 		else
3586 			return 8*1024;
3587 	}
3588 }
3589 
3590 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3591 {
3592 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3593 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3594 	u32 dspcntr = 0;
3595 
3596 	if (crtc_state->gamma_enable)
3597 		dspcntr |= DISPPLANE_GAMMA_ENABLE;
3598 
3599 	if (crtc_state->csc_enable)
3600 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3601 
3602 	if (INTEL_GEN(dev_priv) < 5)
3603 		dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3604 
3605 	return dspcntr;
3606 }
3607 
3608 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3609 			  const struct intel_plane_state *plane_state)
3610 {
3611 	struct drm_i915_private *dev_priv =
3612 		to_i915(plane_state->base.plane->dev);
3613 	const struct drm_framebuffer *fb = plane_state->base.fb;
3614 	unsigned int rotation = plane_state->base.rotation;
3615 	u32 dspcntr;
3616 
3617 	dspcntr = DISPLAY_PLANE_ENABLE;
3618 
3619 	if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3620 	    IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3621 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3622 
3623 	switch (fb->format->format) {
3624 	case DRM_FORMAT_C8:
3625 		dspcntr |= DISPPLANE_8BPP;
3626 		break;
3627 	case DRM_FORMAT_XRGB1555:
3628 		dspcntr |= DISPPLANE_BGRX555;
3629 		break;
3630 	case DRM_FORMAT_RGB565:
3631 		dspcntr |= DISPPLANE_BGRX565;
3632 		break;
3633 	case DRM_FORMAT_XRGB8888:
3634 		dspcntr |= DISPPLANE_BGRX888;
3635 		break;
3636 	case DRM_FORMAT_XBGR8888:
3637 		dspcntr |= DISPPLANE_RGBX888;
3638 		break;
3639 	case DRM_FORMAT_XRGB2101010:
3640 		dspcntr |= DISPPLANE_BGRX101010;
3641 		break;
3642 	case DRM_FORMAT_XBGR2101010:
3643 		dspcntr |= DISPPLANE_RGBX101010;
3644 		break;
3645 	default:
3646 		MISSING_CASE(fb->format->format);
3647 		return 0;
3648 	}
3649 
3650 	if (INTEL_GEN(dev_priv) >= 4 &&
3651 	    fb->modifier == I915_FORMAT_MOD_X_TILED)
3652 		dspcntr |= DISPPLANE_TILED;
3653 
3654 	if (rotation & DRM_MODE_ROTATE_180)
3655 		dspcntr |= DISPPLANE_ROTATE_180;
3656 
3657 	if (rotation & DRM_MODE_REFLECT_X)
3658 		dspcntr |= DISPPLANE_MIRROR;
3659 
3660 	return dspcntr;
3661 }
3662 
3663 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3664 {
3665 	struct drm_i915_private *dev_priv =
3666 		to_i915(plane_state->base.plane->dev);
3667 	int src_x, src_y;
3668 	u32 offset;
3669 	int ret;
3670 
3671 	ret = intel_plane_compute_gtt(plane_state);
3672 	if (ret)
3673 		return ret;
3674 
3675 	if (!plane_state->base.visible)
3676 		return 0;
3677 
3678 	src_x = plane_state->base.src.x1 >> 16;
3679 	src_y = plane_state->base.src.y1 >> 16;
3680 
3681 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3682 
3683 	if (INTEL_GEN(dev_priv) >= 4)
3684 		offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3685 							    plane_state, 0);
3686 	else
3687 		offset = 0;
3688 
3689 	/*
3690 	 * Put the final coordinates back so that the src
3691 	 * coordinate checks will see the right values.
3692 	 */
3693 	drm_rect_translate(&plane_state->base.src,
3694 			   (src_x << 16) - plane_state->base.src.x1,
3695 			   (src_y << 16) - plane_state->base.src.y1);
3696 
3697 	/* HSW/BDW do this automagically in hardware */
3698 	if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3699 		unsigned int rotation = plane_state->base.rotation;
3700 		int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3701 		int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3702 
3703 		if (rotation & DRM_MODE_ROTATE_180) {
3704 			src_x += src_w - 1;
3705 			src_y += src_h - 1;
3706 		} else if (rotation & DRM_MODE_REFLECT_X) {
3707 			src_x += src_w - 1;
3708 		}
3709 	}
3710 
3711 	plane_state->color_plane[0].offset = offset;
3712 	plane_state->color_plane[0].x = src_x;
3713 	plane_state->color_plane[0].y = src_y;
3714 
3715 	return 0;
3716 }
3717 
3718 static int
3719 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3720 		 struct intel_plane_state *plane_state)
3721 {
3722 	int ret;
3723 
3724 	ret = chv_plane_check_rotation(plane_state);
3725 	if (ret)
3726 		return ret;
3727 
3728 	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3729 						  &crtc_state->base,
3730 						  DRM_PLANE_HELPER_NO_SCALING,
3731 						  DRM_PLANE_HELPER_NO_SCALING,
3732 						  false, true);
3733 	if (ret)
3734 		return ret;
3735 
3736 	ret = i9xx_check_plane_surface(plane_state);
3737 	if (ret)
3738 		return ret;
3739 
3740 	if (!plane_state->base.visible)
3741 		return 0;
3742 
3743 	ret = intel_plane_check_src_coordinates(plane_state);
3744 	if (ret)
3745 		return ret;
3746 
3747 	plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3748 
3749 	return 0;
3750 }
3751 
3752 static void i9xx_update_plane(struct intel_plane *plane,
3753 			      const struct intel_crtc_state *crtc_state,
3754 			      const struct intel_plane_state *plane_state)
3755 {
3756 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3757 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3758 	u32 linear_offset;
3759 	int x = plane_state->color_plane[0].x;
3760 	int y = plane_state->color_plane[0].y;
3761 	unsigned long irqflags;
3762 	u32 dspaddr_offset;
3763 	u32 dspcntr;
3764 
3765 	dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3766 
3767 	linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3768 
3769 	if (INTEL_GEN(dev_priv) >= 4)
3770 		dspaddr_offset = plane_state->color_plane[0].offset;
3771 	else
3772 		dspaddr_offset = linear_offset;
3773 
3774 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3775 
3776 	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3777 
3778 	if (INTEL_GEN(dev_priv) < 4) {
3779 		/* pipesrc and dspsize control the size that is scaled from,
3780 		 * which should always be the user's requested size.
3781 		 */
3782 		I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3783 		I915_WRITE_FW(DSPSIZE(i9xx_plane),
3784 			      ((crtc_state->pipe_src_h - 1) << 16) |
3785 			      (crtc_state->pipe_src_w - 1));
3786 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3787 		I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3788 		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3789 			      ((crtc_state->pipe_src_h - 1) << 16) |
3790 			      (crtc_state->pipe_src_w - 1));
3791 		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3792 	}
3793 
3794 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3795 		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3796 	} else if (INTEL_GEN(dev_priv) >= 4) {
3797 		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3798 		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3799 	}
3800 
3801 	/*
3802 	 * The control register self-arms if the plane was previously
3803 	 * disabled. Try to make the plane enable atomic by writing
3804 	 * the control register just before the surface register.
3805 	 */
3806 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3807 	if (INTEL_GEN(dev_priv) >= 4)
3808 		I915_WRITE_FW(DSPSURF(i9xx_plane),
3809 			      intel_plane_ggtt_offset(plane_state) +
3810 			      dspaddr_offset);
3811 	else
3812 		I915_WRITE_FW(DSPADDR(i9xx_plane),
3813 			      intel_plane_ggtt_offset(plane_state) +
3814 			      dspaddr_offset);
3815 
3816 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3817 }
3818 
3819 static void i9xx_disable_plane(struct intel_plane *plane,
3820 			       const struct intel_crtc_state *crtc_state)
3821 {
3822 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3823 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3824 	unsigned long irqflags;
3825 	u32 dspcntr;
3826 
3827 	/*
3828 	 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3829 	 * enable on ilk+ affect the pipe bottom color as
3830 	 * well, so we must configure them even if the plane
3831 	 * is disabled.
3832 	 *
3833 	 * On pre-g4x there is no way to gamma correct the
3834 	 * pipe bottom color but we'll keep on doing this
3835 	 * anyway so that the crtc state readout works correctly.
3836 	 */
3837 	dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3838 
3839 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3840 
3841 	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3842 	if (INTEL_GEN(dev_priv) >= 4)
3843 		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3844 	else
3845 		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3846 
3847 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3848 }
3849 
3850 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3851 				    enum pipe *pipe)
3852 {
3853 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3854 	enum intel_display_power_domain power_domain;
3855 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3856 	intel_wakeref_t wakeref;
3857 	bool ret;
3858 	u32 val;
3859 
3860 	/*
3861 	 * Not 100% correct for planes that can move between pipes,
3862 	 * but that's only the case for gen2-4 which don't have any
3863 	 * display power wells.
3864 	 */
3865 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3866 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3867 	if (!wakeref)
3868 		return false;
3869 
3870 	val = I915_READ(DSPCNTR(i9xx_plane));
3871 
3872 	ret = val & DISPLAY_PLANE_ENABLE;
3873 
3874 	if (INTEL_GEN(dev_priv) >= 5)
3875 		*pipe = plane->pipe;
3876 	else
3877 		*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3878 			DISPPLANE_SEL_PIPE_SHIFT;
3879 
3880 	intel_display_power_put(dev_priv, power_domain, wakeref);
3881 
3882 	return ret;
3883 }
3884 
3885 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3886 {
3887 	struct drm_device *dev = intel_crtc->base.dev;
3888 	struct drm_i915_private *dev_priv = to_i915(dev);
3889 
3890 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3891 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3892 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3893 }
3894 
3895 /*
3896  * This function detaches (aka. unbinds) unused scalers in hardware
3897  */
3898 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3899 {
3900 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3901 	const struct intel_crtc_scaler_state *scaler_state =
3902 		&crtc_state->scaler_state;
3903 	int i;
3904 
3905 	/* loop through and disable scalers that aren't in use */
3906 	for (i = 0; i < intel_crtc->num_scalers; i++) {
3907 		if (!scaler_state->scalers[i].in_use)
3908 			skl_detach_scaler(intel_crtc, i);
3909 	}
3910 }
3911 
3912 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3913 					  int color_plane, unsigned int rotation)
3914 {
3915 	/*
3916 	 * The stride is either expressed as a multiple of 64 bytes chunks for
3917 	 * linear buffers or in number of tiles for tiled buffers.
3918 	 */
3919 	if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3920 		return 64;
3921 	else if (drm_rotation_90_or_270(rotation))
3922 		return intel_tile_height(fb, color_plane);
3923 	else
3924 		return intel_tile_width_bytes(fb, color_plane);
3925 }
3926 
3927 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3928 		     int color_plane)
3929 {
3930 	const struct drm_framebuffer *fb = plane_state->base.fb;
3931 	unsigned int rotation = plane_state->base.rotation;
3932 	u32 stride = plane_state->color_plane[color_plane].stride;
3933 
3934 	if (color_plane >= fb->format->num_planes)
3935 		return 0;
3936 
3937 	return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3938 }
3939 
3940 static u32 skl_plane_ctl_format(u32 pixel_format)
3941 {
3942 	switch (pixel_format) {
3943 	case DRM_FORMAT_C8:
3944 		return PLANE_CTL_FORMAT_INDEXED;
3945 	case DRM_FORMAT_RGB565:
3946 		return PLANE_CTL_FORMAT_RGB_565;
3947 	case DRM_FORMAT_XBGR8888:
3948 	case DRM_FORMAT_ABGR8888:
3949 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3950 	case DRM_FORMAT_XRGB8888:
3951 	case DRM_FORMAT_ARGB8888:
3952 		return PLANE_CTL_FORMAT_XRGB_8888;
3953 	case DRM_FORMAT_XRGB2101010:
3954 		return PLANE_CTL_FORMAT_XRGB_2101010;
3955 	case DRM_FORMAT_XBGR2101010:
3956 		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3957 	case DRM_FORMAT_XBGR16161616F:
3958 	case DRM_FORMAT_ABGR16161616F:
3959 		return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3960 	case DRM_FORMAT_XRGB16161616F:
3961 	case DRM_FORMAT_ARGB16161616F:
3962 		return PLANE_CTL_FORMAT_XRGB_16161616F;
3963 	case DRM_FORMAT_YUYV:
3964 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3965 	case DRM_FORMAT_YVYU:
3966 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3967 	case DRM_FORMAT_UYVY:
3968 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3969 	case DRM_FORMAT_VYUY:
3970 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3971 	case DRM_FORMAT_NV12:
3972 		return PLANE_CTL_FORMAT_NV12;
3973 	case DRM_FORMAT_P010:
3974 		return PLANE_CTL_FORMAT_P010;
3975 	case DRM_FORMAT_P012:
3976 		return PLANE_CTL_FORMAT_P012;
3977 	case DRM_FORMAT_P016:
3978 		return PLANE_CTL_FORMAT_P016;
3979 	case DRM_FORMAT_Y210:
3980 		return PLANE_CTL_FORMAT_Y210;
3981 	case DRM_FORMAT_Y212:
3982 		return PLANE_CTL_FORMAT_Y212;
3983 	case DRM_FORMAT_Y216:
3984 		return PLANE_CTL_FORMAT_Y216;
3985 	case DRM_FORMAT_XVYU2101010:
3986 		return PLANE_CTL_FORMAT_Y410;
3987 	case DRM_FORMAT_XVYU12_16161616:
3988 		return PLANE_CTL_FORMAT_Y412;
3989 	case DRM_FORMAT_XVYU16161616:
3990 		return PLANE_CTL_FORMAT_Y416;
3991 	default:
3992 		MISSING_CASE(pixel_format);
3993 	}
3994 
3995 	return 0;
3996 }
3997 
3998 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3999 {
4000 	if (!plane_state->base.fb->format->has_alpha)
4001 		return PLANE_CTL_ALPHA_DISABLE;
4002 
4003 	switch (plane_state->base.pixel_blend_mode) {
4004 	case DRM_MODE_BLEND_PIXEL_NONE:
4005 		return PLANE_CTL_ALPHA_DISABLE;
4006 	case DRM_MODE_BLEND_PREMULTI:
4007 		return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4008 	case DRM_MODE_BLEND_COVERAGE:
4009 		return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4010 	default:
4011 		MISSING_CASE(plane_state->base.pixel_blend_mode);
4012 		return PLANE_CTL_ALPHA_DISABLE;
4013 	}
4014 }
4015 
4016 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4017 {
4018 	if (!plane_state->base.fb->format->has_alpha)
4019 		return PLANE_COLOR_ALPHA_DISABLE;
4020 
4021 	switch (plane_state->base.pixel_blend_mode) {
4022 	case DRM_MODE_BLEND_PIXEL_NONE:
4023 		return PLANE_COLOR_ALPHA_DISABLE;
4024 	case DRM_MODE_BLEND_PREMULTI:
4025 		return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4026 	case DRM_MODE_BLEND_COVERAGE:
4027 		return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4028 	default:
4029 		MISSING_CASE(plane_state->base.pixel_blend_mode);
4030 		return PLANE_COLOR_ALPHA_DISABLE;
4031 	}
4032 }
4033 
4034 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4035 {
4036 	switch (fb_modifier) {
4037 	case DRM_FORMAT_MOD_LINEAR:
4038 		break;
4039 	case I915_FORMAT_MOD_X_TILED:
4040 		return PLANE_CTL_TILED_X;
4041 	case I915_FORMAT_MOD_Y_TILED:
4042 		return PLANE_CTL_TILED_Y;
4043 	case I915_FORMAT_MOD_Y_TILED_CCS:
4044 		return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4045 	case I915_FORMAT_MOD_Yf_TILED:
4046 		return PLANE_CTL_TILED_YF;
4047 	case I915_FORMAT_MOD_Yf_TILED_CCS:
4048 		return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4049 	default:
4050 		MISSING_CASE(fb_modifier);
4051 	}
4052 
4053 	return 0;
4054 }
4055 
4056 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4057 {
4058 	switch (rotate) {
4059 	case DRM_MODE_ROTATE_0:
4060 		break;
4061 	/*
4062 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4063 	 * while i915 HW rotation is clockwise, thats why this swapping.
4064 	 */
4065 	case DRM_MODE_ROTATE_90:
4066 		return PLANE_CTL_ROTATE_270;
4067 	case DRM_MODE_ROTATE_180:
4068 		return PLANE_CTL_ROTATE_180;
4069 	case DRM_MODE_ROTATE_270:
4070 		return PLANE_CTL_ROTATE_90;
4071 	default:
4072 		MISSING_CASE(rotate);
4073 	}
4074 
4075 	return 0;
4076 }
4077 
4078 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4079 {
4080 	switch (reflect) {
4081 	case 0:
4082 		break;
4083 	case DRM_MODE_REFLECT_X:
4084 		return PLANE_CTL_FLIP_HORIZONTAL;
4085 	case DRM_MODE_REFLECT_Y:
4086 	default:
4087 		MISSING_CASE(reflect);
4088 	}
4089 
4090 	return 0;
4091 }
4092 
4093 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4094 {
4095 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4096 	u32 plane_ctl = 0;
4097 
4098 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4099 		return plane_ctl;
4100 
4101 	if (crtc_state->gamma_enable)
4102 		plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4103 
4104 	if (crtc_state->csc_enable)
4105 		plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4106 
4107 	return plane_ctl;
4108 }
4109 
4110 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4111 		  const struct intel_plane_state *plane_state)
4112 {
4113 	struct drm_i915_private *dev_priv =
4114 		to_i915(plane_state->base.plane->dev);
4115 	const struct drm_framebuffer *fb = plane_state->base.fb;
4116 	unsigned int rotation = plane_state->base.rotation;
4117 	const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4118 	u32 plane_ctl;
4119 
4120 	plane_ctl = PLANE_CTL_ENABLE;
4121 
4122 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4123 		plane_ctl |= skl_plane_ctl_alpha(plane_state);
4124 		plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4125 
4126 		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4127 			plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4128 
4129 		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4130 			plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4131 	}
4132 
4133 	plane_ctl |= skl_plane_ctl_format(fb->format->format);
4134 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4135 	plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4136 
4137 	if (INTEL_GEN(dev_priv) >= 10)
4138 		plane_ctl |= cnl_plane_ctl_flip(rotation &
4139 						DRM_MODE_REFLECT_MASK);
4140 
4141 	if (key->flags & I915_SET_COLORKEY_DESTINATION)
4142 		plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4143 	else if (key->flags & I915_SET_COLORKEY_SOURCE)
4144 		plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4145 
4146 	return plane_ctl;
4147 }
4148 
4149 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4150 {
4151 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4152 	u32 plane_color_ctl = 0;
4153 
4154 	if (INTEL_GEN(dev_priv) >= 11)
4155 		return plane_color_ctl;
4156 
4157 	if (crtc_state->gamma_enable)
4158 		plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4159 
4160 	if (crtc_state->csc_enable)
4161 		plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4162 
4163 	return plane_color_ctl;
4164 }
4165 
4166 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4167 			const struct intel_plane_state *plane_state)
4168 {
4169 	struct drm_i915_private *dev_priv =
4170 		to_i915(plane_state->base.plane->dev);
4171 	const struct drm_framebuffer *fb = plane_state->base.fb;
4172 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4173 	u32 plane_color_ctl = 0;
4174 
4175 	plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4176 	plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4177 
4178 	if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4179 		if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4180 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4181 		else
4182 			plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4183 
4184 		if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4185 			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4186 	} else if (fb->format->is_yuv) {
4187 		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4188 	}
4189 
4190 	return plane_color_ctl;
4191 }
4192 
4193 static int
4194 __intel_display_resume(struct drm_device *dev,
4195 		       struct drm_atomic_state *state,
4196 		       struct drm_modeset_acquire_ctx *ctx)
4197 {
4198 	struct drm_crtc_state *crtc_state;
4199 	struct drm_crtc *crtc;
4200 	int i, ret;
4201 
4202 	intel_modeset_setup_hw_state(dev, ctx);
4203 	i915_redisable_vga(to_i915(dev));
4204 
4205 	if (!state)
4206 		return 0;
4207 
4208 	/*
4209 	 * We've duplicated the state, pointers to the old state are invalid.
4210 	 *
4211 	 * Don't attempt to use the old state until we commit the duplicated state.
4212 	 */
4213 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4214 		/*
4215 		 * Force recalculation even if we restore
4216 		 * current state. With fast modeset this may not result
4217 		 * in a modeset when the state is compatible.
4218 		 */
4219 		crtc_state->mode_changed = true;
4220 	}
4221 
4222 	/* ignore any reset values/BIOS leftovers in the WM registers */
4223 	if (!HAS_GMCH(to_i915(dev)))
4224 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
4225 
4226 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4227 
4228 	WARN_ON(ret == -EDEADLK);
4229 	return ret;
4230 }
4231 
4232 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4233 {
4234 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4235 		intel_has_gpu_reset(dev_priv));
4236 }
4237 
4238 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4239 {
4240 	struct drm_device *dev = &dev_priv->drm;
4241 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4242 	struct drm_atomic_state *state;
4243 	int ret;
4244 
4245 	/* reset doesn't touch the display */
4246 	if (!i915_modparams.force_reset_modeset_test &&
4247 	    !gpu_reset_clobbers_display(dev_priv))
4248 		return;
4249 
4250 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
4251 	set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4252 	wake_up_all(&dev_priv->gpu_error.wait_queue);
4253 
4254 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4255 		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4256 		i915_gem_set_wedged(dev_priv);
4257 	}
4258 
4259 	/*
4260 	 * Need mode_config.mutex so that we don't
4261 	 * trample ongoing ->detect() and whatnot.
4262 	 */
4263 	mutex_lock(&dev->mode_config.mutex);
4264 	drm_modeset_acquire_init(ctx, 0);
4265 	while (1) {
4266 		ret = drm_modeset_lock_all_ctx(dev, ctx);
4267 		if (ret != -EDEADLK)
4268 			break;
4269 
4270 		drm_modeset_backoff(ctx);
4271 	}
4272 	/*
4273 	 * Disabling the crtcs gracefully seems nicer. Also the
4274 	 * g33 docs say we should at least disable all the planes.
4275 	 */
4276 	state = drm_atomic_helper_duplicate_state(dev, ctx);
4277 	if (IS_ERR(state)) {
4278 		ret = PTR_ERR(state);
4279 		DRM_ERROR("Duplicating state failed with %i\n", ret);
4280 		return;
4281 	}
4282 
4283 	ret = drm_atomic_helper_disable_all(dev, ctx);
4284 	if (ret) {
4285 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4286 		drm_atomic_state_put(state);
4287 		return;
4288 	}
4289 
4290 	dev_priv->modeset_restore_state = state;
4291 	state->acquire_ctx = ctx;
4292 }
4293 
4294 void intel_finish_reset(struct drm_i915_private *dev_priv)
4295 {
4296 	struct drm_device *dev = &dev_priv->drm;
4297 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4298 	struct drm_atomic_state *state;
4299 	int ret;
4300 
4301 	/* reset doesn't touch the display */
4302 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
4303 		return;
4304 
4305 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
4306 	if (!state)
4307 		goto unlock;
4308 
4309 	/* reset doesn't touch the display */
4310 	if (!gpu_reset_clobbers_display(dev_priv)) {
4311 		/* for testing only restore the display */
4312 		ret = __intel_display_resume(dev, state, ctx);
4313 		if (ret)
4314 			DRM_ERROR("Restoring old state failed with %i\n", ret);
4315 	} else {
4316 		/*
4317 		 * The display has been reset as well,
4318 		 * so need a full re-initialization.
4319 		 */
4320 		intel_pps_unlock_regs_wa(dev_priv);
4321 		intel_modeset_init_hw(dev);
4322 		intel_init_clock_gating(dev_priv);
4323 
4324 		spin_lock_irq(&dev_priv->irq_lock);
4325 		if (dev_priv->display.hpd_irq_setup)
4326 			dev_priv->display.hpd_irq_setup(dev_priv);
4327 		spin_unlock_irq(&dev_priv->irq_lock);
4328 
4329 		ret = __intel_display_resume(dev, state, ctx);
4330 		if (ret)
4331 			DRM_ERROR("Restoring old state failed with %i\n", ret);
4332 
4333 		intel_hpd_init(dev_priv);
4334 	}
4335 
4336 	drm_atomic_state_put(state);
4337 unlock:
4338 	drm_modeset_drop_locks(ctx);
4339 	drm_modeset_acquire_fini(ctx);
4340 	mutex_unlock(&dev->mode_config.mutex);
4341 
4342 	clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4343 }
4344 
4345 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4346 {
4347 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4348 	enum pipe pipe = crtc->pipe;
4349 	u32 tmp;
4350 
4351 	tmp = I915_READ(PIPE_CHICKEN(pipe));
4352 
4353 	/*
4354 	 * Display WA #1153: icl
4355 	 * enable hardware to bypass the alpha math
4356 	 * and rounding for per-pixel values 00 and 0xff
4357 	 */
4358 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4359 	/*
4360 	 * Display WA # 1605353570: icl
4361 	 * Set the pixel rounding bit to 1 for allowing
4362 	 * passthrough of Frame buffer pixels unmodified
4363 	 * across pipe
4364 	 */
4365 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4366 	I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4367 }
4368 
4369 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4370 				     const struct intel_crtc_state *new_crtc_state)
4371 {
4372 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4373 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4374 
4375 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4376 	crtc->base.mode = new_crtc_state->base.mode;
4377 
4378 	/*
4379 	 * Update pipe size and adjust fitter if needed: the reason for this is
4380 	 * that in compute_mode_changes we check the native mode (not the pfit
4381 	 * mode) to see if we can flip rather than do a full mode set. In the
4382 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
4383 	 * pfit state, we'll end up with a big fb scanned out into the wrong
4384 	 * sized surface.
4385 	 */
4386 
4387 	I915_WRITE(PIPESRC(crtc->pipe),
4388 		   ((new_crtc_state->pipe_src_w - 1) << 16) |
4389 		   (new_crtc_state->pipe_src_h - 1));
4390 
4391 	/* on skylake this is done by detaching scalers */
4392 	if (INTEL_GEN(dev_priv) >= 9) {
4393 		skl_detach_scalers(new_crtc_state);
4394 
4395 		if (new_crtc_state->pch_pfit.enabled)
4396 			skylake_pfit_enable(new_crtc_state);
4397 	} else if (HAS_PCH_SPLIT(dev_priv)) {
4398 		if (new_crtc_state->pch_pfit.enabled)
4399 			ironlake_pfit_enable(new_crtc_state);
4400 		else if (old_crtc_state->pch_pfit.enabled)
4401 			ironlake_pfit_disable(old_crtc_state);
4402 	}
4403 
4404 	if (INTEL_GEN(dev_priv) >= 11)
4405 		icl_set_pipe_chicken(crtc);
4406 }
4407 
4408 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4409 {
4410 	struct drm_device *dev = crtc->base.dev;
4411 	struct drm_i915_private *dev_priv = to_i915(dev);
4412 	int pipe = crtc->pipe;
4413 	i915_reg_t reg;
4414 	u32 temp;
4415 
4416 	/* enable normal train */
4417 	reg = FDI_TX_CTL(pipe);
4418 	temp = I915_READ(reg);
4419 	if (IS_IVYBRIDGE(dev_priv)) {
4420 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4421 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4422 	} else {
4423 		temp &= ~FDI_LINK_TRAIN_NONE;
4424 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4425 	}
4426 	I915_WRITE(reg, temp);
4427 
4428 	reg = FDI_RX_CTL(pipe);
4429 	temp = I915_READ(reg);
4430 	if (HAS_PCH_CPT(dev_priv)) {
4431 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4432 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4433 	} else {
4434 		temp &= ~FDI_LINK_TRAIN_NONE;
4435 		temp |= FDI_LINK_TRAIN_NONE;
4436 	}
4437 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4438 
4439 	/* wait one idle pattern time */
4440 	POSTING_READ(reg);
4441 	udelay(1000);
4442 
4443 	/* IVB wants error correction enabled */
4444 	if (IS_IVYBRIDGE(dev_priv))
4445 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4446 			   FDI_FE_ERRC_ENABLE);
4447 }
4448 
4449 /* The FDI link training functions for ILK/Ibexpeak. */
4450 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4451 				    const struct intel_crtc_state *crtc_state)
4452 {
4453 	struct drm_device *dev = crtc->base.dev;
4454 	struct drm_i915_private *dev_priv = to_i915(dev);
4455 	int pipe = crtc->pipe;
4456 	i915_reg_t reg;
4457 	u32 temp, tries;
4458 
4459 	/* FDI needs bits from pipe first */
4460 	assert_pipe_enabled(dev_priv, pipe);
4461 
4462 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4463 	   for train result */
4464 	reg = FDI_RX_IMR(pipe);
4465 	temp = I915_READ(reg);
4466 	temp &= ~FDI_RX_SYMBOL_LOCK;
4467 	temp &= ~FDI_RX_BIT_LOCK;
4468 	I915_WRITE(reg, temp);
4469 	I915_READ(reg);
4470 	udelay(150);
4471 
4472 	/* enable CPU FDI TX and PCH FDI RX */
4473 	reg = FDI_TX_CTL(pipe);
4474 	temp = I915_READ(reg);
4475 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
4476 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4477 	temp &= ~FDI_LINK_TRAIN_NONE;
4478 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4479 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
4480 
4481 	reg = FDI_RX_CTL(pipe);
4482 	temp = I915_READ(reg);
4483 	temp &= ~FDI_LINK_TRAIN_NONE;
4484 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4485 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
4486 
4487 	POSTING_READ(reg);
4488 	udelay(150);
4489 
4490 	/* Ironlake workaround, enable clock pointer after FDI enable*/
4491 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4492 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4493 		   FDI_RX_PHASE_SYNC_POINTER_EN);
4494 
4495 	reg = FDI_RX_IIR(pipe);
4496 	for (tries = 0; tries < 5; tries++) {
4497 		temp = I915_READ(reg);
4498 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4499 
4500 		if ((temp & FDI_RX_BIT_LOCK)) {
4501 			DRM_DEBUG_KMS("FDI train 1 done.\n");
4502 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4503 			break;
4504 		}
4505 	}
4506 	if (tries == 5)
4507 		DRM_ERROR("FDI train 1 fail!\n");
4508 
4509 	/* Train 2 */
4510 	reg = FDI_TX_CTL(pipe);
4511 	temp = I915_READ(reg);
4512 	temp &= ~FDI_LINK_TRAIN_NONE;
4513 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4514 	I915_WRITE(reg, temp);
4515 
4516 	reg = FDI_RX_CTL(pipe);
4517 	temp = I915_READ(reg);
4518 	temp &= ~FDI_LINK_TRAIN_NONE;
4519 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4520 	I915_WRITE(reg, temp);
4521 
4522 	POSTING_READ(reg);
4523 	udelay(150);
4524 
4525 	reg = FDI_RX_IIR(pipe);
4526 	for (tries = 0; tries < 5; tries++) {
4527 		temp = I915_READ(reg);
4528 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4529 
4530 		if (temp & FDI_RX_SYMBOL_LOCK) {
4531 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4532 			DRM_DEBUG_KMS("FDI train 2 done.\n");
4533 			break;
4534 		}
4535 	}
4536 	if (tries == 5)
4537 		DRM_ERROR("FDI train 2 fail!\n");
4538 
4539 	DRM_DEBUG_KMS("FDI train done\n");
4540 
4541 }
4542 
4543 static const int snb_b_fdi_train_param[] = {
4544 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4545 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4546 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4547 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4548 };
4549 
4550 /* The FDI link training functions for SNB/Cougarpoint. */
4551 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4552 				const struct intel_crtc_state *crtc_state)
4553 {
4554 	struct drm_device *dev = crtc->base.dev;
4555 	struct drm_i915_private *dev_priv = to_i915(dev);
4556 	int pipe = crtc->pipe;
4557 	i915_reg_t reg;
4558 	u32 temp, i, retry;
4559 
4560 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4561 	   for train result */
4562 	reg = FDI_RX_IMR(pipe);
4563 	temp = I915_READ(reg);
4564 	temp &= ~FDI_RX_SYMBOL_LOCK;
4565 	temp &= ~FDI_RX_BIT_LOCK;
4566 	I915_WRITE(reg, temp);
4567 
4568 	POSTING_READ(reg);
4569 	udelay(150);
4570 
4571 	/* enable CPU FDI TX and PCH FDI RX */
4572 	reg = FDI_TX_CTL(pipe);
4573 	temp = I915_READ(reg);
4574 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
4575 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4576 	temp &= ~FDI_LINK_TRAIN_NONE;
4577 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4578 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4579 	/* SNB-B */
4580 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4581 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
4582 
4583 	I915_WRITE(FDI_RX_MISC(pipe),
4584 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4585 
4586 	reg = FDI_RX_CTL(pipe);
4587 	temp = I915_READ(reg);
4588 	if (HAS_PCH_CPT(dev_priv)) {
4589 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4590 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4591 	} else {
4592 		temp &= ~FDI_LINK_TRAIN_NONE;
4593 		temp |= FDI_LINK_TRAIN_PATTERN_1;
4594 	}
4595 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
4596 
4597 	POSTING_READ(reg);
4598 	udelay(150);
4599 
4600 	for (i = 0; i < 4; i++) {
4601 		reg = FDI_TX_CTL(pipe);
4602 		temp = I915_READ(reg);
4603 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4604 		temp |= snb_b_fdi_train_param[i];
4605 		I915_WRITE(reg, temp);
4606 
4607 		POSTING_READ(reg);
4608 		udelay(500);
4609 
4610 		for (retry = 0; retry < 5; retry++) {
4611 			reg = FDI_RX_IIR(pipe);
4612 			temp = I915_READ(reg);
4613 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4614 			if (temp & FDI_RX_BIT_LOCK) {
4615 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4616 				DRM_DEBUG_KMS("FDI train 1 done.\n");
4617 				break;
4618 			}
4619 			udelay(50);
4620 		}
4621 		if (retry < 5)
4622 			break;
4623 	}
4624 	if (i == 4)
4625 		DRM_ERROR("FDI train 1 fail!\n");
4626 
4627 	/* Train 2 */
4628 	reg = FDI_TX_CTL(pipe);
4629 	temp = I915_READ(reg);
4630 	temp &= ~FDI_LINK_TRAIN_NONE;
4631 	temp |= FDI_LINK_TRAIN_PATTERN_2;
4632 	if (IS_GEN(dev_priv, 6)) {
4633 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4634 		/* SNB-B */
4635 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4636 	}
4637 	I915_WRITE(reg, temp);
4638 
4639 	reg = FDI_RX_CTL(pipe);
4640 	temp = I915_READ(reg);
4641 	if (HAS_PCH_CPT(dev_priv)) {
4642 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4643 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4644 	} else {
4645 		temp &= ~FDI_LINK_TRAIN_NONE;
4646 		temp |= FDI_LINK_TRAIN_PATTERN_2;
4647 	}
4648 	I915_WRITE(reg, temp);
4649 
4650 	POSTING_READ(reg);
4651 	udelay(150);
4652 
4653 	for (i = 0; i < 4; i++) {
4654 		reg = FDI_TX_CTL(pipe);
4655 		temp = I915_READ(reg);
4656 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4657 		temp |= snb_b_fdi_train_param[i];
4658 		I915_WRITE(reg, temp);
4659 
4660 		POSTING_READ(reg);
4661 		udelay(500);
4662 
4663 		for (retry = 0; retry < 5; retry++) {
4664 			reg = FDI_RX_IIR(pipe);
4665 			temp = I915_READ(reg);
4666 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4667 			if (temp & FDI_RX_SYMBOL_LOCK) {
4668 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4669 				DRM_DEBUG_KMS("FDI train 2 done.\n");
4670 				break;
4671 			}
4672 			udelay(50);
4673 		}
4674 		if (retry < 5)
4675 			break;
4676 	}
4677 	if (i == 4)
4678 		DRM_ERROR("FDI train 2 fail!\n");
4679 
4680 	DRM_DEBUG_KMS("FDI train done.\n");
4681 }
4682 
4683 /* Manual link training for Ivy Bridge A0 parts */
4684 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4685 				      const struct intel_crtc_state *crtc_state)
4686 {
4687 	struct drm_device *dev = crtc->base.dev;
4688 	struct drm_i915_private *dev_priv = to_i915(dev);
4689 	int pipe = crtc->pipe;
4690 	i915_reg_t reg;
4691 	u32 temp, i, j;
4692 
4693 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4694 	   for train result */
4695 	reg = FDI_RX_IMR(pipe);
4696 	temp = I915_READ(reg);
4697 	temp &= ~FDI_RX_SYMBOL_LOCK;
4698 	temp &= ~FDI_RX_BIT_LOCK;
4699 	I915_WRITE(reg, temp);
4700 
4701 	POSTING_READ(reg);
4702 	udelay(150);
4703 
4704 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4705 		      I915_READ(FDI_RX_IIR(pipe)));
4706 
4707 	/* Try each vswing and preemphasis setting twice before moving on */
4708 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4709 		/* disable first in case we need to retry */
4710 		reg = FDI_TX_CTL(pipe);
4711 		temp = I915_READ(reg);
4712 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4713 		temp &= ~FDI_TX_ENABLE;
4714 		I915_WRITE(reg, temp);
4715 
4716 		reg = FDI_RX_CTL(pipe);
4717 		temp = I915_READ(reg);
4718 		temp &= ~FDI_LINK_TRAIN_AUTO;
4719 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4720 		temp &= ~FDI_RX_ENABLE;
4721 		I915_WRITE(reg, temp);
4722 
4723 		/* enable CPU FDI TX and PCH FDI RX */
4724 		reg = FDI_TX_CTL(pipe);
4725 		temp = I915_READ(reg);
4726 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
4727 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4728 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4729 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4730 		temp |= snb_b_fdi_train_param[j/2];
4731 		temp |= FDI_COMPOSITE_SYNC;
4732 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
4733 
4734 		I915_WRITE(FDI_RX_MISC(pipe),
4735 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4736 
4737 		reg = FDI_RX_CTL(pipe);
4738 		temp = I915_READ(reg);
4739 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4740 		temp |= FDI_COMPOSITE_SYNC;
4741 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
4742 
4743 		POSTING_READ(reg);
4744 		udelay(1); /* should be 0.5us */
4745 
4746 		for (i = 0; i < 4; i++) {
4747 			reg = FDI_RX_IIR(pipe);
4748 			temp = I915_READ(reg);
4749 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4750 
4751 			if (temp & FDI_RX_BIT_LOCK ||
4752 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4753 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4754 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4755 					      i);
4756 				break;
4757 			}
4758 			udelay(1); /* should be 0.5us */
4759 		}
4760 		if (i == 4) {
4761 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4762 			continue;
4763 		}
4764 
4765 		/* Train 2 */
4766 		reg = FDI_TX_CTL(pipe);
4767 		temp = I915_READ(reg);
4768 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4769 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4770 		I915_WRITE(reg, temp);
4771 
4772 		reg = FDI_RX_CTL(pipe);
4773 		temp = I915_READ(reg);
4774 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4775 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4776 		I915_WRITE(reg, temp);
4777 
4778 		POSTING_READ(reg);
4779 		udelay(2); /* should be 1.5us */
4780 
4781 		for (i = 0; i < 4; i++) {
4782 			reg = FDI_RX_IIR(pipe);
4783 			temp = I915_READ(reg);
4784 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4785 
4786 			if (temp & FDI_RX_SYMBOL_LOCK ||
4787 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4788 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4789 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4790 					      i);
4791 				goto train_done;
4792 			}
4793 			udelay(2); /* should be 1.5us */
4794 		}
4795 		if (i == 4)
4796 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4797 	}
4798 
4799 train_done:
4800 	DRM_DEBUG_KMS("FDI train done.\n");
4801 }
4802 
4803 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4804 {
4805 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4806 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4807 	int pipe = intel_crtc->pipe;
4808 	i915_reg_t reg;
4809 	u32 temp;
4810 
4811 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4812 	reg = FDI_RX_CTL(pipe);
4813 	temp = I915_READ(reg);
4814 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4815 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4816 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4817 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4818 
4819 	POSTING_READ(reg);
4820 	udelay(200);
4821 
4822 	/* Switch from Rawclk to PCDclk */
4823 	temp = I915_READ(reg);
4824 	I915_WRITE(reg, temp | FDI_PCDCLK);
4825 
4826 	POSTING_READ(reg);
4827 	udelay(200);
4828 
4829 	/* Enable CPU FDI TX PLL, always on for Ironlake */
4830 	reg = FDI_TX_CTL(pipe);
4831 	temp = I915_READ(reg);
4832 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4833 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4834 
4835 		POSTING_READ(reg);
4836 		udelay(100);
4837 	}
4838 }
4839 
4840 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4841 {
4842 	struct drm_device *dev = intel_crtc->base.dev;
4843 	struct drm_i915_private *dev_priv = to_i915(dev);
4844 	int pipe = intel_crtc->pipe;
4845 	i915_reg_t reg;
4846 	u32 temp;
4847 
4848 	/* Switch from PCDclk to Rawclk */
4849 	reg = FDI_RX_CTL(pipe);
4850 	temp = I915_READ(reg);
4851 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
4852 
4853 	/* Disable CPU FDI TX PLL */
4854 	reg = FDI_TX_CTL(pipe);
4855 	temp = I915_READ(reg);
4856 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4857 
4858 	POSTING_READ(reg);
4859 	udelay(100);
4860 
4861 	reg = FDI_RX_CTL(pipe);
4862 	temp = I915_READ(reg);
4863 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4864 
4865 	/* Wait for the clocks to turn off. */
4866 	POSTING_READ(reg);
4867 	udelay(100);
4868 }
4869 
4870 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4871 {
4872 	struct drm_device *dev = crtc->dev;
4873 	struct drm_i915_private *dev_priv = to_i915(dev);
4874 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4875 	int pipe = intel_crtc->pipe;
4876 	i915_reg_t reg;
4877 	u32 temp;
4878 
4879 	/* disable CPU FDI tx and PCH FDI rx */
4880 	reg = FDI_TX_CTL(pipe);
4881 	temp = I915_READ(reg);
4882 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4883 	POSTING_READ(reg);
4884 
4885 	reg = FDI_RX_CTL(pipe);
4886 	temp = I915_READ(reg);
4887 	temp &= ~(0x7 << 16);
4888 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4889 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4890 
4891 	POSTING_READ(reg);
4892 	udelay(100);
4893 
4894 	/* Ironlake workaround, disable clock pointer after downing FDI */
4895 	if (HAS_PCH_IBX(dev_priv))
4896 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4897 
4898 	/* still set train pattern 1 */
4899 	reg = FDI_TX_CTL(pipe);
4900 	temp = I915_READ(reg);
4901 	temp &= ~FDI_LINK_TRAIN_NONE;
4902 	temp |= FDI_LINK_TRAIN_PATTERN_1;
4903 	I915_WRITE(reg, temp);
4904 
4905 	reg = FDI_RX_CTL(pipe);
4906 	temp = I915_READ(reg);
4907 	if (HAS_PCH_CPT(dev_priv)) {
4908 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4909 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4910 	} else {
4911 		temp &= ~FDI_LINK_TRAIN_NONE;
4912 		temp |= FDI_LINK_TRAIN_PATTERN_1;
4913 	}
4914 	/* BPC in FDI rx is consistent with that in PIPECONF */
4915 	temp &= ~(0x07 << 16);
4916 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4917 	I915_WRITE(reg, temp);
4918 
4919 	POSTING_READ(reg);
4920 	udelay(100);
4921 }
4922 
4923 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4924 {
4925 	struct drm_crtc *crtc;
4926 	bool cleanup_done;
4927 
4928 	drm_for_each_crtc(crtc, &dev_priv->drm) {
4929 		struct drm_crtc_commit *commit;
4930 		spin_lock(&crtc->commit_lock);
4931 		commit = list_first_entry_or_null(&crtc->commit_list,
4932 						  struct drm_crtc_commit, commit_entry);
4933 		cleanup_done = commit ?
4934 			try_wait_for_completion(&commit->cleanup_done) : true;
4935 		spin_unlock(&crtc->commit_lock);
4936 
4937 		if (cleanup_done)
4938 			continue;
4939 
4940 		drm_crtc_wait_one_vblank(crtc);
4941 
4942 		return true;
4943 	}
4944 
4945 	return false;
4946 }
4947 
4948 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4949 {
4950 	u32 temp;
4951 
4952 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4953 
4954 	mutex_lock(&dev_priv->sb_lock);
4955 
4956 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4957 	temp |= SBI_SSCCTL_DISABLE;
4958 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4959 
4960 	mutex_unlock(&dev_priv->sb_lock);
4961 }
4962 
4963 /* Program iCLKIP clock to the desired frequency */
4964 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4965 {
4966 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4967 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4968 	int clock = crtc_state->base.adjusted_mode.crtc_clock;
4969 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
4970 	u32 temp;
4971 
4972 	lpt_disable_iclkip(dev_priv);
4973 
4974 	/* The iCLK virtual clock root frequency is in MHz,
4975 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
4976 	 * divisors, it is necessary to divide one by another, so we
4977 	 * convert the virtual clock precision to KHz here for higher
4978 	 * precision.
4979 	 */
4980 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4981 		u32 iclk_virtual_root_freq = 172800 * 1000;
4982 		u32 iclk_pi_range = 64;
4983 		u32 desired_divisor;
4984 
4985 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4986 						    clock << auxdiv);
4987 		divsel = (desired_divisor / iclk_pi_range) - 2;
4988 		phaseinc = desired_divisor % iclk_pi_range;
4989 
4990 		/*
4991 		 * Near 20MHz is a corner case which is
4992 		 * out of range for the 7-bit divisor
4993 		 */
4994 		if (divsel <= 0x7f)
4995 			break;
4996 	}
4997 
4998 	/* This should not happen with any sane values */
4999 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5000 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5001 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5002 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5003 
5004 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5005 			clock,
5006 			auxdiv,
5007 			divsel,
5008 			phasedir,
5009 			phaseinc);
5010 
5011 	mutex_lock(&dev_priv->sb_lock);
5012 
5013 	/* Program SSCDIVINTPHASE6 */
5014 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5015 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5016 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5017 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5018 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5019 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5020 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5021 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5022 
5023 	/* Program SSCAUXDIV */
5024 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5025 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5026 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5027 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5028 
5029 	/* Enable modulator and associated divider */
5030 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5031 	temp &= ~SBI_SSCCTL_DISABLE;
5032 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5033 
5034 	mutex_unlock(&dev_priv->sb_lock);
5035 
5036 	/* Wait for initialization time */
5037 	udelay(24);
5038 
5039 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5040 }
5041 
5042 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5043 {
5044 	u32 divsel, phaseinc, auxdiv;
5045 	u32 iclk_virtual_root_freq = 172800 * 1000;
5046 	u32 iclk_pi_range = 64;
5047 	u32 desired_divisor;
5048 	u32 temp;
5049 
5050 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5051 		return 0;
5052 
5053 	mutex_lock(&dev_priv->sb_lock);
5054 
5055 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5056 	if (temp & SBI_SSCCTL_DISABLE) {
5057 		mutex_unlock(&dev_priv->sb_lock);
5058 		return 0;
5059 	}
5060 
5061 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5062 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5063 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5064 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5065 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5066 
5067 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5068 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5069 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5070 
5071 	mutex_unlock(&dev_priv->sb_lock);
5072 
5073 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5074 
5075 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5076 				 desired_divisor << auxdiv);
5077 }
5078 
5079 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5080 						enum pipe pch_transcoder)
5081 {
5082 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5083 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5084 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5085 
5086 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5087 		   I915_READ(HTOTAL(cpu_transcoder)));
5088 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5089 		   I915_READ(HBLANK(cpu_transcoder)));
5090 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5091 		   I915_READ(HSYNC(cpu_transcoder)));
5092 
5093 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5094 		   I915_READ(VTOTAL(cpu_transcoder)));
5095 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5096 		   I915_READ(VBLANK(cpu_transcoder)));
5097 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5098 		   I915_READ(VSYNC(cpu_transcoder)));
5099 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5100 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
5101 }
5102 
5103 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5104 {
5105 	u32 temp;
5106 
5107 	temp = I915_READ(SOUTH_CHICKEN1);
5108 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5109 		return;
5110 
5111 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5112 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5113 
5114 	temp &= ~FDI_BC_BIFURCATION_SELECT;
5115 	if (enable)
5116 		temp |= FDI_BC_BIFURCATION_SELECT;
5117 
5118 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5119 	I915_WRITE(SOUTH_CHICKEN1, temp);
5120 	POSTING_READ(SOUTH_CHICKEN1);
5121 }
5122 
5123 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5124 {
5125 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5126 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5127 
5128 	switch (crtc->pipe) {
5129 	case PIPE_A:
5130 		break;
5131 	case PIPE_B:
5132 		if (crtc_state->fdi_lanes > 2)
5133 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
5134 		else
5135 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
5136 
5137 		break;
5138 	case PIPE_C:
5139 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
5140 
5141 		break;
5142 	default:
5143 		BUG();
5144 	}
5145 }
5146 
5147 /*
5148  * Finds the encoder associated with the given CRTC. This can only be
5149  * used when we know that the CRTC isn't feeding multiple encoders!
5150  */
5151 static struct intel_encoder *
5152 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5153 			   const struct intel_crtc_state *crtc_state)
5154 {
5155 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5156 	const struct drm_connector_state *connector_state;
5157 	const struct drm_connector *connector;
5158 	struct intel_encoder *encoder = NULL;
5159 	int num_encoders = 0;
5160 	int i;
5161 
5162 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5163 		if (connector_state->crtc != &crtc->base)
5164 			continue;
5165 
5166 		encoder = to_intel_encoder(connector_state->best_encoder);
5167 		num_encoders++;
5168 	}
5169 
5170 	WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5171 	     num_encoders, pipe_name(crtc->pipe));
5172 
5173 	return encoder;
5174 }
5175 
5176 /*
5177  * Enable PCH resources required for PCH ports:
5178  *   - PCH PLLs
5179  *   - FDI training & RX/TX
5180  *   - update transcoder timings
5181  *   - DP transcoding bits
5182  *   - transcoder
5183  */
5184 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5185 				const struct intel_crtc_state *crtc_state)
5186 {
5187 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5188 	struct drm_device *dev = crtc->base.dev;
5189 	struct drm_i915_private *dev_priv = to_i915(dev);
5190 	int pipe = crtc->pipe;
5191 	u32 temp;
5192 
5193 	assert_pch_transcoder_disabled(dev_priv, pipe);
5194 
5195 	if (IS_IVYBRIDGE(dev_priv))
5196 		ivybridge_update_fdi_bc_bifurcation(crtc_state);
5197 
5198 	/* Write the TU size bits before fdi link training, so that error
5199 	 * detection works. */
5200 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
5201 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5202 
5203 	/* For PCH output, training FDI link */
5204 	dev_priv->display.fdi_link_train(crtc, crtc_state);
5205 
5206 	/* We need to program the right clock selection before writing the pixel
5207 	 * mutliplier into the DPLL. */
5208 	if (HAS_PCH_CPT(dev_priv)) {
5209 		u32 sel;
5210 
5211 		temp = I915_READ(PCH_DPLL_SEL);
5212 		temp |= TRANS_DPLL_ENABLE(pipe);
5213 		sel = TRANS_DPLLB_SEL(pipe);
5214 		if (crtc_state->shared_dpll ==
5215 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5216 			temp |= sel;
5217 		else
5218 			temp &= ~sel;
5219 		I915_WRITE(PCH_DPLL_SEL, temp);
5220 	}
5221 
5222 	/* XXX: pch pll's can be enabled any time before we enable the PCH
5223 	 * transcoder, and we actually should do this to not upset any PCH
5224 	 * transcoder that already use the clock when we share it.
5225 	 *
5226 	 * Note that enable_shared_dpll tries to do the right thing, but
5227 	 * get_shared_dpll unconditionally resets the pll - we need that to have
5228 	 * the right LVDS enable sequence. */
5229 	intel_enable_shared_dpll(crtc_state);
5230 
5231 	/* set transcoder timing, panel must allow it */
5232 	assert_panel_unlocked(dev_priv, pipe);
5233 	ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5234 
5235 	intel_fdi_normal_train(crtc);
5236 
5237 	/* For PCH DP, enable TRANS_DP_CTL */
5238 	if (HAS_PCH_CPT(dev_priv) &&
5239 	    intel_crtc_has_dp_encoder(crtc_state)) {
5240 		const struct drm_display_mode *adjusted_mode =
5241 			&crtc_state->base.adjusted_mode;
5242 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5243 		i915_reg_t reg = TRANS_DP_CTL(pipe);
5244 		enum port port;
5245 
5246 		temp = I915_READ(reg);
5247 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
5248 			  TRANS_DP_SYNC_MASK |
5249 			  TRANS_DP_BPC_MASK);
5250 		temp |= TRANS_DP_OUTPUT_ENABLE;
5251 		temp |= bpc << 9; /* same format but at 11:9 */
5252 
5253 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5254 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5255 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5256 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5257 
5258 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5259 		WARN_ON(port < PORT_B || port > PORT_D);
5260 		temp |= TRANS_DP_PORT_SEL(port);
5261 
5262 		I915_WRITE(reg, temp);
5263 	}
5264 
5265 	ironlake_enable_pch_transcoder(crtc_state);
5266 }
5267 
5268 static void lpt_pch_enable(const struct intel_atomic_state *state,
5269 			   const struct intel_crtc_state *crtc_state)
5270 {
5271 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5272 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5273 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5274 
5275 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5276 
5277 	lpt_program_iclkip(crtc_state);
5278 
5279 	/* Set transcoder timing. */
5280 	ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5281 
5282 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5283 }
5284 
5285 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
5286 {
5287 	struct drm_i915_private *dev_priv = to_i915(dev);
5288 	i915_reg_t dslreg = PIPEDSL(pipe);
5289 	u32 temp;
5290 
5291 	temp = I915_READ(dslreg);
5292 	udelay(500);
5293 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
5294 		if (wait_for(I915_READ(dslreg) != temp, 5))
5295 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5296 	}
5297 }
5298 
5299 /*
5300  * The hardware phase 0.0 refers to the center of the pixel.
5301  * We want to start from the top/left edge which is phase
5302  * -0.5. That matches how the hardware calculates the scaling
5303  * factors (from top-left of the first pixel to bottom-right
5304  * of the last pixel, as opposed to the pixel centers).
5305  *
5306  * For 4:2:0 subsampled chroma planes we obviously have to
5307  * adjust that so that the chroma sample position lands in
5308  * the right spot.
5309  *
5310  * Note that for packed YCbCr 4:2:2 formats there is no way to
5311  * control chroma siting. The hardware simply replicates the
5312  * chroma samples for both of the luma samples, and thus we don't
5313  * actually get the expected MPEG2 chroma siting convention :(
5314  * The same behaviour is observed on pre-SKL platforms as well.
5315  *
5316  * Theory behind the formula (note that we ignore sub-pixel
5317  * source coordinates):
5318  * s = source sample position
5319  * d = destination sample position
5320  *
5321  * Downscaling 4:1:
5322  * -0.5
5323  * | 0.0
5324  * | |     1.5 (initial phase)
5325  * | |     |
5326  * v v     v
5327  * | s | s | s | s |
5328  * |       d       |
5329  *
5330  * Upscaling 1:4:
5331  * -0.5
5332  * | -0.375 (initial phase)
5333  * | |     0.0
5334  * | |     |
5335  * v v     v
5336  * |       s       |
5337  * | d | d | d | d |
5338  */
5339 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5340 {
5341 	int phase = -0x8000;
5342 	u16 trip = 0;
5343 
5344 	if (chroma_cosited)
5345 		phase += (sub - 1) * 0x8000 / sub;
5346 
5347 	phase += scale / (2 * sub);
5348 
5349 	/*
5350 	 * Hardware initial phase limited to [-0.5:1.5].
5351 	 * Since the max hardware scale factor is 3.0, we
5352 	 * should never actually excdeed 1.0 here.
5353 	 */
5354 	WARN_ON(phase < -0x8000 || phase > 0x18000);
5355 
5356 	if (phase < 0)
5357 		phase = 0x10000 + phase;
5358 	else
5359 		trip = PS_PHASE_TRIP;
5360 
5361 	return ((phase >> 2) & PS_PHASE_MASK) | trip;
5362 }
5363 
5364 #define SKL_MIN_SRC_W 8
5365 #define SKL_MAX_SRC_W 4096
5366 #define SKL_MIN_SRC_H 8
5367 #define SKL_MAX_SRC_H 4096
5368 #define SKL_MIN_DST_W 8
5369 #define SKL_MAX_DST_W 4096
5370 #define SKL_MIN_DST_H 8
5371 #define SKL_MAX_DST_H 4096
5372 #define ICL_MAX_SRC_W 5120
5373 #define ICL_MAX_SRC_H 4096
5374 #define ICL_MAX_DST_W 5120
5375 #define ICL_MAX_DST_H 4096
5376 #define SKL_MIN_YUV_420_SRC_W 16
5377 #define SKL_MIN_YUV_420_SRC_H 16
5378 
5379 static int
5380 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5381 		  unsigned int scaler_user, int *scaler_id,
5382 		  int src_w, int src_h, int dst_w, int dst_h,
5383 		  const struct drm_format_info *format, bool need_scaler)
5384 {
5385 	struct intel_crtc_scaler_state *scaler_state =
5386 		&crtc_state->scaler_state;
5387 	struct intel_crtc *intel_crtc =
5388 		to_intel_crtc(crtc_state->base.crtc);
5389 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5390 	const struct drm_display_mode *adjusted_mode =
5391 		&crtc_state->base.adjusted_mode;
5392 
5393 	/*
5394 	 * Src coordinates are already rotated by 270 degrees for
5395 	 * the 90/270 degree plane rotation cases (to match the
5396 	 * GTT mapping), hence no need to account for rotation here.
5397 	 */
5398 	if (src_w != dst_w || src_h != dst_h)
5399 		need_scaler = true;
5400 
5401 	/*
5402 	 * Scaling/fitting not supported in IF-ID mode in GEN9+
5403 	 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5404 	 * Once NV12 is enabled, handle it here while allocating scaler
5405 	 * for NV12.
5406 	 */
5407 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5408 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5409 		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5410 		return -EINVAL;
5411 	}
5412 
5413 	/*
5414 	 * if plane is being disabled or scaler is no more required or force detach
5415 	 *  - free scaler binded to this plane/crtc
5416 	 *  - in order to do this, update crtc->scaler_usage
5417 	 *
5418 	 * Here scaler state in crtc_state is set free so that
5419 	 * scaler can be assigned to other user. Actual register
5420 	 * update to free the scaler is done in plane/panel-fit programming.
5421 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5422 	 */
5423 	if (force_detach || !need_scaler) {
5424 		if (*scaler_id >= 0) {
5425 			scaler_state->scaler_users &= ~(1 << scaler_user);
5426 			scaler_state->scalers[*scaler_id].in_use = 0;
5427 
5428 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
5429 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
5430 				intel_crtc->pipe, scaler_user, *scaler_id,
5431 				scaler_state->scaler_users);
5432 			*scaler_id = -1;
5433 		}
5434 		return 0;
5435 	}
5436 
5437 	if (format && is_planar_yuv_format(format->format) &&
5438 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5439 		DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5440 		return -EINVAL;
5441 	}
5442 
5443 	/* range checks */
5444 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5445 	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5446 	    (INTEL_GEN(dev_priv) >= 11 &&
5447 	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5448 	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5449 	    (INTEL_GEN(dev_priv) < 11 &&
5450 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5451 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
5452 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5453 			"size is out of scaler range\n",
5454 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5455 		return -EINVAL;
5456 	}
5457 
5458 	/* mark this plane as a scaler user in crtc_state */
5459 	scaler_state->scaler_users |= (1 << scaler_user);
5460 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
5461 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5462 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5463 		scaler_state->scaler_users);
5464 
5465 	return 0;
5466 }
5467 
5468 /**
5469  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5470  *
5471  * @state: crtc's scaler state
5472  *
5473  * Return
5474  *     0 - scaler_usage updated successfully
5475  *    error - requested scaling cannot be supported or other error condition
5476  */
5477 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5478 {
5479 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5480 	bool need_scaler = false;
5481 
5482 	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5483 		need_scaler = true;
5484 
5485 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5486 				 &state->scaler_state.scaler_id,
5487 				 state->pipe_src_w, state->pipe_src_h,
5488 				 adjusted_mode->crtc_hdisplay,
5489 				 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5490 }
5491 
5492 /**
5493  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5494  * @crtc_state: crtc's scaler state
5495  * @plane_state: atomic plane state to update
5496  *
5497  * Return
5498  *     0 - scaler_usage updated successfully
5499  *    error - requested scaling cannot be supported or other error condition
5500  */
5501 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5502 				   struct intel_plane_state *plane_state)
5503 {
5504 	struct intel_plane *intel_plane =
5505 		to_intel_plane(plane_state->base.plane);
5506 	struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5507 	struct drm_framebuffer *fb = plane_state->base.fb;
5508 	int ret;
5509 	bool force_detach = !fb || !plane_state->base.visible;
5510 	bool need_scaler = false;
5511 
5512 	/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5513 	if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5514 	    fb && is_planar_yuv_format(fb->format->format))
5515 		need_scaler = true;
5516 
5517 	ret = skl_update_scaler(crtc_state, force_detach,
5518 				drm_plane_index(&intel_plane->base),
5519 				&plane_state->scaler_id,
5520 				drm_rect_width(&plane_state->base.src) >> 16,
5521 				drm_rect_height(&plane_state->base.src) >> 16,
5522 				drm_rect_width(&plane_state->base.dst),
5523 				drm_rect_height(&plane_state->base.dst),
5524 				fb ? fb->format : NULL, need_scaler);
5525 
5526 	if (ret || plane_state->scaler_id < 0)
5527 		return ret;
5528 
5529 	/* check colorkey */
5530 	if (plane_state->ckey.flags) {
5531 		DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5532 			      intel_plane->base.base.id,
5533 			      intel_plane->base.name);
5534 		return -EINVAL;
5535 	}
5536 
5537 	/* Check src format */
5538 	switch (fb->format->format) {
5539 	case DRM_FORMAT_RGB565:
5540 	case DRM_FORMAT_XBGR8888:
5541 	case DRM_FORMAT_XRGB8888:
5542 	case DRM_FORMAT_ABGR8888:
5543 	case DRM_FORMAT_ARGB8888:
5544 	case DRM_FORMAT_XRGB2101010:
5545 	case DRM_FORMAT_XBGR2101010:
5546 	case DRM_FORMAT_XBGR16161616F:
5547 	case DRM_FORMAT_ABGR16161616F:
5548 	case DRM_FORMAT_XRGB16161616F:
5549 	case DRM_FORMAT_ARGB16161616F:
5550 	case DRM_FORMAT_YUYV:
5551 	case DRM_FORMAT_YVYU:
5552 	case DRM_FORMAT_UYVY:
5553 	case DRM_FORMAT_VYUY:
5554 	case DRM_FORMAT_NV12:
5555 	case DRM_FORMAT_P010:
5556 	case DRM_FORMAT_P012:
5557 	case DRM_FORMAT_P016:
5558 	case DRM_FORMAT_Y210:
5559 	case DRM_FORMAT_Y212:
5560 	case DRM_FORMAT_Y216:
5561 	case DRM_FORMAT_XVYU2101010:
5562 	case DRM_FORMAT_XVYU12_16161616:
5563 	case DRM_FORMAT_XVYU16161616:
5564 		break;
5565 	default:
5566 		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5567 			      intel_plane->base.base.id, intel_plane->base.name,
5568 			      fb->base.id, fb->format->format);
5569 		return -EINVAL;
5570 	}
5571 
5572 	return 0;
5573 }
5574 
5575 static void skylake_scaler_disable(struct intel_crtc *crtc)
5576 {
5577 	int i;
5578 
5579 	for (i = 0; i < crtc->num_scalers; i++)
5580 		skl_detach_scaler(crtc, i);
5581 }
5582 
5583 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5584 {
5585 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5586 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5587 	enum pipe pipe = crtc->pipe;
5588 	const struct intel_crtc_scaler_state *scaler_state =
5589 		&crtc_state->scaler_state;
5590 
5591 	if (crtc_state->pch_pfit.enabled) {
5592 		u16 uv_rgb_hphase, uv_rgb_vphase;
5593 		int pfit_w, pfit_h, hscale, vscale;
5594 		int id;
5595 
5596 		if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5597 			return;
5598 
5599 		pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5600 		pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5601 
5602 		hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5603 		vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5604 
5605 		uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5606 		uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5607 
5608 		id = scaler_state->scaler_id;
5609 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5610 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5611 		I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5612 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5613 		I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5614 			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5615 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5616 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5617 	}
5618 }
5619 
5620 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5621 {
5622 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5623 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5624 	int pipe = crtc->pipe;
5625 
5626 	if (crtc_state->pch_pfit.enabled) {
5627 		/* Force use of hard-coded filter coefficients
5628 		 * as some pre-programmed values are broken,
5629 		 * e.g. x201.
5630 		 */
5631 		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5632 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5633 						 PF_PIPE_SEL_IVB(pipe));
5634 		else
5635 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5636 		I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5637 		I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5638 	}
5639 }
5640 
5641 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5642 {
5643 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5644 	struct drm_device *dev = crtc->base.dev;
5645 	struct drm_i915_private *dev_priv = to_i915(dev);
5646 
5647 	if (!crtc_state->ips_enabled)
5648 		return;
5649 
5650 	/*
5651 	 * We can only enable IPS after we enable a plane and wait for a vblank
5652 	 * This function is called from post_plane_update, which is run after
5653 	 * a vblank wait.
5654 	 */
5655 	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5656 
5657 	if (IS_BROADWELL(dev_priv)) {
5658 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5659 						IPS_ENABLE | IPS_PCODE_CONTROL));
5660 		/* Quoting Art Runyan: "its not safe to expect any particular
5661 		 * value in IPS_CTL bit 31 after enabling IPS through the
5662 		 * mailbox." Moreover, the mailbox may return a bogus state,
5663 		 * so we need to just enable it and continue on.
5664 		 */
5665 	} else {
5666 		I915_WRITE(IPS_CTL, IPS_ENABLE);
5667 		/* The bit only becomes 1 in the next vblank, so this wait here
5668 		 * is essentially intel_wait_for_vblank. If we don't have this
5669 		 * and don't wait for vblanks until the end of crtc_enable, then
5670 		 * the HW state readout code will complain that the expected
5671 		 * IPS_CTL value is not the one we read. */
5672 		if (intel_wait_for_register(&dev_priv->uncore,
5673 					    IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5674 					    50))
5675 			DRM_ERROR("Timed out waiting for IPS enable\n");
5676 	}
5677 }
5678 
5679 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5680 {
5681 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5682 	struct drm_device *dev = crtc->base.dev;
5683 	struct drm_i915_private *dev_priv = to_i915(dev);
5684 
5685 	if (!crtc_state->ips_enabled)
5686 		return;
5687 
5688 	if (IS_BROADWELL(dev_priv)) {
5689 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5690 		/*
5691 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
5692 		 * 42ms timeout value leads to occasional timeouts so use 100ms
5693 		 * instead.
5694 		 */
5695 		if (intel_wait_for_register(&dev_priv->uncore,
5696 					    IPS_CTL, IPS_ENABLE, 0,
5697 					    100))
5698 			DRM_ERROR("Timed out waiting for IPS disable\n");
5699 	} else {
5700 		I915_WRITE(IPS_CTL, 0);
5701 		POSTING_READ(IPS_CTL);
5702 	}
5703 
5704 	/* We need to wait for a vblank before we can disable the plane. */
5705 	intel_wait_for_vblank(dev_priv, crtc->pipe);
5706 }
5707 
5708 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5709 {
5710 	if (intel_crtc->overlay) {
5711 		struct drm_device *dev = intel_crtc->base.dev;
5712 
5713 		mutex_lock(&dev->struct_mutex);
5714 		(void) intel_overlay_switch_off(intel_crtc->overlay);
5715 		mutex_unlock(&dev->struct_mutex);
5716 	}
5717 
5718 	/* Let userspace switch the overlay on again. In most cases userspace
5719 	 * has to recompute where to put it anyway.
5720 	 */
5721 }
5722 
5723 /**
5724  * intel_post_enable_primary - Perform operations after enabling primary plane
5725  * @crtc: the CRTC whose primary plane was just enabled
5726  * @new_crtc_state: the enabling state
5727  *
5728  * Performs potentially sleeping operations that must be done after the primary
5729  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5730  * called due to an explicit primary plane update, or due to an implicit
5731  * re-enable that is caused when a sprite plane is updated to no longer
5732  * completely hide the primary plane.
5733  */
5734 static void
5735 intel_post_enable_primary(struct drm_crtc *crtc,
5736 			  const struct intel_crtc_state *new_crtc_state)
5737 {
5738 	struct drm_device *dev = crtc->dev;
5739 	struct drm_i915_private *dev_priv = to_i915(dev);
5740 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5741 	int pipe = intel_crtc->pipe;
5742 
5743 	/*
5744 	 * Gen2 reports pipe underruns whenever all planes are disabled.
5745 	 * So don't enable underrun reporting before at least some planes
5746 	 * are enabled.
5747 	 * FIXME: Need to fix the logic to work when we turn off all planes
5748 	 * but leave the pipe running.
5749 	 */
5750 	if (IS_GEN(dev_priv, 2))
5751 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5752 
5753 	/* Underruns don't always raise interrupts, so check manually. */
5754 	intel_check_cpu_fifo_underruns(dev_priv);
5755 	intel_check_pch_fifo_underruns(dev_priv);
5756 }
5757 
5758 /* FIXME get rid of this and use pre_plane_update */
5759 static void
5760 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5761 {
5762 	struct drm_device *dev = crtc->dev;
5763 	struct drm_i915_private *dev_priv = to_i915(dev);
5764 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5765 	int pipe = intel_crtc->pipe;
5766 
5767 	/*
5768 	 * Gen2 reports pipe underruns whenever all planes are disabled.
5769 	 * So disable underrun reporting before all the planes get disabled.
5770 	 */
5771 	if (IS_GEN(dev_priv, 2))
5772 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5773 
5774 	hsw_disable_ips(to_intel_crtc_state(crtc->state));
5775 
5776 	/*
5777 	 * Vblank time updates from the shadow to live plane control register
5778 	 * are blocked if the memory self-refresh mode is active at that
5779 	 * moment. So to make sure the plane gets truly disabled, disable
5780 	 * first the self-refresh mode. The self-refresh enable bit in turn
5781 	 * will be checked/applied by the HW only at the next frame start
5782 	 * event which is after the vblank start event, so we need to have a
5783 	 * wait-for-vblank between disabling the plane and the pipe.
5784 	 */
5785 	if (HAS_GMCH(dev_priv) &&
5786 	    intel_set_memory_cxsr(dev_priv, false))
5787 		intel_wait_for_vblank(dev_priv, pipe);
5788 }
5789 
5790 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5791 				       const struct intel_crtc_state *new_crtc_state)
5792 {
5793 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5794 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5795 
5796 	if (!old_crtc_state->ips_enabled)
5797 		return false;
5798 
5799 	if (needs_modeset(&new_crtc_state->base))
5800 		return true;
5801 
5802 	/*
5803 	 * Workaround : Do not read or write the pipe palette/gamma data while
5804 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5805 	 *
5806 	 * Disable IPS before we program the LUT.
5807 	 */
5808 	if (IS_HASWELL(dev_priv) &&
5809 	    (new_crtc_state->base.color_mgmt_changed ||
5810 	     new_crtc_state->update_pipe) &&
5811 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5812 		return true;
5813 
5814 	return !new_crtc_state->ips_enabled;
5815 }
5816 
5817 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5818 				       const struct intel_crtc_state *new_crtc_state)
5819 {
5820 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5821 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5822 
5823 	if (!new_crtc_state->ips_enabled)
5824 		return false;
5825 
5826 	if (needs_modeset(&new_crtc_state->base))
5827 		return true;
5828 
5829 	/*
5830 	 * Workaround : Do not read or write the pipe palette/gamma data while
5831 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5832 	 *
5833 	 * Re-enable IPS after the LUT has been programmed.
5834 	 */
5835 	if (IS_HASWELL(dev_priv) &&
5836 	    (new_crtc_state->base.color_mgmt_changed ||
5837 	     new_crtc_state->update_pipe) &&
5838 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5839 		return true;
5840 
5841 	/*
5842 	 * We can't read out IPS on broadwell, assume the worst and
5843 	 * forcibly enable IPS on the first fastset.
5844 	 */
5845 	if (new_crtc_state->update_pipe &&
5846 	    old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5847 		return true;
5848 
5849 	return !old_crtc_state->ips_enabled;
5850 }
5851 
5852 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5853 			  const struct intel_crtc_state *crtc_state)
5854 {
5855 	if (!crtc_state->nv12_planes)
5856 		return false;
5857 
5858 	/* WA Display #0827: Gen9:all */
5859 	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5860 		return true;
5861 
5862 	return false;
5863 }
5864 
5865 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5866 			       const struct intel_crtc_state *crtc_state)
5867 {
5868 	/* Wa_2006604312:icl */
5869 	if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5870 		return true;
5871 
5872 	return false;
5873 }
5874 
5875 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5876 {
5877 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5878 	struct drm_device *dev = crtc->base.dev;
5879 	struct drm_i915_private *dev_priv = to_i915(dev);
5880 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
5881 	struct intel_crtc_state *pipe_config =
5882 		intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5883 						crtc);
5884 	struct drm_plane *primary = crtc->base.primary;
5885 	struct drm_plane_state *old_primary_state =
5886 		drm_atomic_get_old_plane_state(old_state, primary);
5887 
5888 	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5889 
5890 	if (pipe_config->update_wm_post && pipe_config->base.active)
5891 		intel_update_watermarks(crtc);
5892 
5893 	if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5894 		hsw_enable_ips(pipe_config);
5895 
5896 	if (old_primary_state) {
5897 		struct drm_plane_state *new_primary_state =
5898 			drm_atomic_get_new_plane_state(old_state, primary);
5899 
5900 		intel_fbc_post_update(crtc);
5901 
5902 		if (new_primary_state->visible &&
5903 		    (needs_modeset(&pipe_config->base) ||
5904 		     !old_primary_state->visible))
5905 			intel_post_enable_primary(&crtc->base, pipe_config);
5906 	}
5907 
5908 	if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5909 	    !needs_nv12_wa(dev_priv, pipe_config))
5910 		skl_wa_827(dev_priv, crtc->pipe, false);
5911 
5912 	if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5913 	    !needs_scalerclk_wa(dev_priv, pipe_config))
5914 		icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5915 }
5916 
5917 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5918 				   struct intel_crtc_state *pipe_config)
5919 {
5920 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5921 	struct drm_device *dev = crtc->base.dev;
5922 	struct drm_i915_private *dev_priv = to_i915(dev);
5923 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
5924 	struct drm_plane *primary = crtc->base.primary;
5925 	struct drm_plane_state *old_primary_state =
5926 		drm_atomic_get_old_plane_state(old_state, primary);
5927 	bool modeset = needs_modeset(&pipe_config->base);
5928 	struct intel_atomic_state *old_intel_state =
5929 		to_intel_atomic_state(old_state);
5930 
5931 	if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5932 		hsw_disable_ips(old_crtc_state);
5933 
5934 	if (old_primary_state) {
5935 		struct intel_plane_state *new_primary_state =
5936 			intel_atomic_get_new_plane_state(old_intel_state,
5937 							 to_intel_plane(primary));
5938 
5939 		intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5940 		/*
5941 		 * Gen2 reports pipe underruns whenever all planes are disabled.
5942 		 * So disable underrun reporting before all the planes get disabled.
5943 		 */
5944 		if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5945 		    (modeset || !new_primary_state->base.visible))
5946 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5947 	}
5948 
5949 	/* Display WA 827 */
5950 	if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5951 	    needs_nv12_wa(dev_priv, pipe_config))
5952 		skl_wa_827(dev_priv, crtc->pipe, true);
5953 
5954 	/* Wa_2006604312:icl */
5955 	if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5956 	    needs_scalerclk_wa(dev_priv, pipe_config))
5957 		icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5958 
5959 	/*
5960 	 * Vblank time updates from the shadow to live plane control register
5961 	 * are blocked if the memory self-refresh mode is active at that
5962 	 * moment. So to make sure the plane gets truly disabled, disable
5963 	 * first the self-refresh mode. The self-refresh enable bit in turn
5964 	 * will be checked/applied by the HW only at the next frame start
5965 	 * event which is after the vblank start event, so we need to have a
5966 	 * wait-for-vblank between disabling the plane and the pipe.
5967 	 */
5968 	if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5969 	    pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5970 		intel_wait_for_vblank(dev_priv, crtc->pipe);
5971 
5972 	/*
5973 	 * IVB workaround: must disable low power watermarks for at least
5974 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
5975 	 * when scaling is disabled.
5976 	 *
5977 	 * WaCxSRDisabledForSpriteScaling:ivb
5978 	 */
5979 	if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5980 	    old_crtc_state->base.active)
5981 		intel_wait_for_vblank(dev_priv, crtc->pipe);
5982 
5983 	/*
5984 	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
5985 	 * watermark programming here.
5986 	 */
5987 	if (needs_modeset(&pipe_config->base))
5988 		return;
5989 
5990 	/*
5991 	 * For platforms that support atomic watermarks, program the
5992 	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5993 	 * will be the intermediate values that are safe for both pre- and
5994 	 * post- vblank; when vblank happens, the 'active' values will be set
5995 	 * to the final 'target' values and we'll do this again to get the
5996 	 * optimal watermarks.  For gen9+ platforms, the values we program here
5997 	 * will be the final target values which will get automatically latched
5998 	 * at vblank time; no further programming will be necessary.
5999 	 *
6000 	 * If a platform hasn't been transitioned to atomic watermarks yet,
6001 	 * we'll continue to update watermarks the old way, if flags tell
6002 	 * us to.
6003 	 */
6004 	if (dev_priv->display.initial_watermarks != NULL)
6005 		dev_priv->display.initial_watermarks(old_intel_state,
6006 						     pipe_config);
6007 	else if (pipe_config->update_wm_pre)
6008 		intel_update_watermarks(crtc);
6009 }
6010 
6011 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6012 				      struct intel_crtc *crtc)
6013 {
6014 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6015 	const struct intel_crtc_state *new_crtc_state =
6016 		intel_atomic_get_new_crtc_state(state, crtc);
6017 	unsigned int update_mask = new_crtc_state->update_planes;
6018 	const struct intel_plane_state *old_plane_state;
6019 	struct intel_plane *plane;
6020 	unsigned fb_bits = 0;
6021 	int i;
6022 
6023 	intel_crtc_dpms_overlay_disable(crtc);
6024 
6025 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6026 		if (crtc->pipe != plane->pipe ||
6027 		    !(update_mask & BIT(plane->id)))
6028 			continue;
6029 
6030 		intel_disable_plane(plane, new_crtc_state);
6031 
6032 		if (old_plane_state->base.visible)
6033 			fb_bits |= plane->frontbuffer_bit;
6034 	}
6035 
6036 	intel_frontbuffer_flip(dev_priv, fb_bits);
6037 }
6038 
6039 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
6040 					  struct intel_crtc_state *crtc_state,
6041 					  struct drm_atomic_state *old_state)
6042 {
6043 	struct drm_connector_state *conn_state;
6044 	struct drm_connector *conn;
6045 	int i;
6046 
6047 	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6048 		struct intel_encoder *encoder =
6049 			to_intel_encoder(conn_state->best_encoder);
6050 
6051 		if (conn_state->crtc != crtc)
6052 			continue;
6053 
6054 		if (encoder->pre_pll_enable)
6055 			encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6056 	}
6057 }
6058 
6059 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
6060 				      struct intel_crtc_state *crtc_state,
6061 				      struct drm_atomic_state *old_state)
6062 {
6063 	struct drm_connector_state *conn_state;
6064 	struct drm_connector *conn;
6065 	int i;
6066 
6067 	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6068 		struct intel_encoder *encoder =
6069 			to_intel_encoder(conn_state->best_encoder);
6070 
6071 		if (conn_state->crtc != crtc)
6072 			continue;
6073 
6074 		if (encoder->pre_enable)
6075 			encoder->pre_enable(encoder, crtc_state, conn_state);
6076 	}
6077 }
6078 
6079 static void intel_encoders_enable(struct drm_crtc *crtc,
6080 				  struct intel_crtc_state *crtc_state,
6081 				  struct drm_atomic_state *old_state)
6082 {
6083 	struct drm_connector_state *conn_state;
6084 	struct drm_connector *conn;
6085 	int i;
6086 
6087 	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6088 		struct intel_encoder *encoder =
6089 			to_intel_encoder(conn_state->best_encoder);
6090 
6091 		if (conn_state->crtc != crtc)
6092 			continue;
6093 
6094 		if (encoder->enable)
6095 			encoder->enable(encoder, crtc_state, conn_state);
6096 		intel_opregion_notify_encoder(encoder, true);
6097 	}
6098 }
6099 
6100 static void intel_encoders_disable(struct drm_crtc *crtc,
6101 				   struct intel_crtc_state *old_crtc_state,
6102 				   struct drm_atomic_state *old_state)
6103 {
6104 	struct drm_connector_state *old_conn_state;
6105 	struct drm_connector *conn;
6106 	int i;
6107 
6108 	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6109 		struct intel_encoder *encoder =
6110 			to_intel_encoder(old_conn_state->best_encoder);
6111 
6112 		if (old_conn_state->crtc != crtc)
6113 			continue;
6114 
6115 		intel_opregion_notify_encoder(encoder, false);
6116 		if (encoder->disable)
6117 			encoder->disable(encoder, old_crtc_state, old_conn_state);
6118 	}
6119 }
6120 
6121 static void intel_encoders_post_disable(struct drm_crtc *crtc,
6122 					struct intel_crtc_state *old_crtc_state,
6123 					struct drm_atomic_state *old_state)
6124 {
6125 	struct drm_connector_state *old_conn_state;
6126 	struct drm_connector *conn;
6127 	int i;
6128 
6129 	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6130 		struct intel_encoder *encoder =
6131 			to_intel_encoder(old_conn_state->best_encoder);
6132 
6133 		if (old_conn_state->crtc != crtc)
6134 			continue;
6135 
6136 		if (encoder->post_disable)
6137 			encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6138 	}
6139 }
6140 
6141 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
6142 					    struct intel_crtc_state *old_crtc_state,
6143 					    struct drm_atomic_state *old_state)
6144 {
6145 	struct drm_connector_state *old_conn_state;
6146 	struct drm_connector *conn;
6147 	int i;
6148 
6149 	for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
6150 		struct intel_encoder *encoder =
6151 			to_intel_encoder(old_conn_state->best_encoder);
6152 
6153 		if (old_conn_state->crtc != crtc)
6154 			continue;
6155 
6156 		if (encoder->post_pll_disable)
6157 			encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6158 	}
6159 }
6160 
6161 static void intel_encoders_update_pipe(struct drm_crtc *crtc,
6162 				       struct intel_crtc_state *crtc_state,
6163 				       struct drm_atomic_state *old_state)
6164 {
6165 	struct drm_connector_state *conn_state;
6166 	struct drm_connector *conn;
6167 	int i;
6168 
6169 	for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6170 		struct intel_encoder *encoder =
6171 			to_intel_encoder(conn_state->best_encoder);
6172 
6173 		if (conn_state->crtc != crtc)
6174 			continue;
6175 
6176 		if (encoder->update_pipe)
6177 			encoder->update_pipe(encoder, crtc_state, conn_state);
6178 	}
6179 }
6180 
6181 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6182 {
6183 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6184 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6185 
6186 	plane->disable_plane(plane, crtc_state);
6187 }
6188 
6189 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6190 				 struct drm_atomic_state *old_state)
6191 {
6192 	struct drm_crtc *crtc = pipe_config->base.crtc;
6193 	struct drm_device *dev = crtc->dev;
6194 	struct drm_i915_private *dev_priv = to_i915(dev);
6195 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6196 	int pipe = intel_crtc->pipe;
6197 	struct intel_atomic_state *old_intel_state =
6198 		to_intel_atomic_state(old_state);
6199 
6200 	if (WARN_ON(intel_crtc->active))
6201 		return;
6202 
6203 	/*
6204 	 * Sometimes spurious CPU pipe underruns happen during FDI
6205 	 * training, at least with VGA+HDMI cloning. Suppress them.
6206 	 *
6207 	 * On ILK we get an occasional spurious CPU pipe underruns
6208 	 * between eDP port A enable and vdd enable. Also PCH port
6209 	 * enable seems to result in the occasional CPU pipe underrun.
6210 	 *
6211 	 * Spurious PCH underruns also occur during PCH enabling.
6212 	 */
6213 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6214 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6215 
6216 	if (pipe_config->has_pch_encoder)
6217 		intel_prepare_shared_dpll(pipe_config);
6218 
6219 	if (intel_crtc_has_dp_encoder(pipe_config))
6220 		intel_dp_set_m_n(pipe_config, M1_N1);
6221 
6222 	intel_set_pipe_timings(pipe_config);
6223 	intel_set_pipe_src_size(pipe_config);
6224 
6225 	if (pipe_config->has_pch_encoder) {
6226 		intel_cpu_transcoder_set_m_n(pipe_config,
6227 					     &pipe_config->fdi_m_n, NULL);
6228 	}
6229 
6230 	ironlake_set_pipeconf(pipe_config);
6231 
6232 	intel_crtc->active = true;
6233 
6234 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
6235 
6236 	if (pipe_config->has_pch_encoder) {
6237 		/* Note: FDI PLL enabling _must_ be done before we enable the
6238 		 * cpu pipes, hence this is separate from all the other fdi/pch
6239 		 * enabling. */
6240 		ironlake_fdi_pll_enable(pipe_config);
6241 	} else {
6242 		assert_fdi_tx_disabled(dev_priv, pipe);
6243 		assert_fdi_rx_disabled(dev_priv, pipe);
6244 	}
6245 
6246 	ironlake_pfit_enable(pipe_config);
6247 
6248 	/*
6249 	 * On ILK+ LUT must be loaded before the pipe is running but with
6250 	 * clocks enabled
6251 	 */
6252 	intel_color_load_luts(pipe_config);
6253 	intel_color_commit(pipe_config);
6254 	/* update DSPCNTR to configure gamma for pipe bottom color */
6255 	intel_disable_primary_plane(pipe_config);
6256 
6257 	if (dev_priv->display.initial_watermarks != NULL)
6258 		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6259 	intel_enable_pipe(pipe_config);
6260 
6261 	if (pipe_config->has_pch_encoder)
6262 		ironlake_pch_enable(old_intel_state, pipe_config);
6263 
6264 	assert_vblank_disabled(crtc);
6265 	intel_crtc_vblank_on(pipe_config);
6266 
6267 	intel_encoders_enable(crtc, pipe_config, old_state);
6268 
6269 	if (HAS_PCH_CPT(dev_priv))
6270 		cpt_verify_modeset(dev, intel_crtc->pipe);
6271 
6272 	/*
6273 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6274 	 * And a second vblank wait is needed at least on ILK with
6275 	 * some interlaced HDMI modes. Let's do the double wait always
6276 	 * in case there are more corner cases we don't know about.
6277 	 */
6278 	if (pipe_config->has_pch_encoder) {
6279 		intel_wait_for_vblank(dev_priv, pipe);
6280 		intel_wait_for_vblank(dev_priv, pipe);
6281 	}
6282 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6283 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6284 }
6285 
6286 /* IPS only exists on ULT machines and is tied to pipe A. */
6287 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6288 {
6289 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6290 }
6291 
6292 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6293 					    enum pipe pipe, bool apply)
6294 {
6295 	u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6296 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6297 
6298 	if (apply)
6299 		val |= mask;
6300 	else
6301 		val &= ~mask;
6302 
6303 	I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6304 }
6305 
6306 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6307 {
6308 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6309 	enum pipe pipe = crtc->pipe;
6310 	u32 val;
6311 
6312 	val = MBUS_DBOX_A_CREDIT(2);
6313 	val |= MBUS_DBOX_BW_CREDIT(1);
6314 	val |= MBUS_DBOX_B_CREDIT(8);
6315 
6316 	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6317 }
6318 
6319 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6320 				struct drm_atomic_state *old_state)
6321 {
6322 	struct drm_crtc *crtc = pipe_config->base.crtc;
6323 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6324 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6325 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6326 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6327 	struct intel_atomic_state *old_intel_state =
6328 		to_intel_atomic_state(old_state);
6329 	bool psl_clkgate_wa;
6330 
6331 	if (WARN_ON(intel_crtc->active))
6332 		return;
6333 
6334 	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6335 
6336 	if (pipe_config->shared_dpll)
6337 		intel_enable_shared_dpll(pipe_config);
6338 
6339 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
6340 
6341 	if (intel_crtc_has_dp_encoder(pipe_config))
6342 		intel_dp_set_m_n(pipe_config, M1_N1);
6343 
6344 	if (!transcoder_is_dsi(cpu_transcoder))
6345 		intel_set_pipe_timings(pipe_config);
6346 
6347 	intel_set_pipe_src_size(pipe_config);
6348 
6349 	if (cpu_transcoder != TRANSCODER_EDP &&
6350 	    !transcoder_is_dsi(cpu_transcoder)) {
6351 		I915_WRITE(PIPE_MULT(cpu_transcoder),
6352 			   pipe_config->pixel_multiplier - 1);
6353 	}
6354 
6355 	if (pipe_config->has_pch_encoder) {
6356 		intel_cpu_transcoder_set_m_n(pipe_config,
6357 					     &pipe_config->fdi_m_n, NULL);
6358 	}
6359 
6360 	if (!transcoder_is_dsi(cpu_transcoder))
6361 		haswell_set_pipeconf(pipe_config);
6362 
6363 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6364 		bdw_set_pipemisc(pipe_config);
6365 
6366 	intel_crtc->active = true;
6367 
6368 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6369 	psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6370 			 pipe_config->pch_pfit.enabled;
6371 	if (psl_clkgate_wa)
6372 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6373 
6374 	if (INTEL_GEN(dev_priv) >= 9)
6375 		skylake_pfit_enable(pipe_config);
6376 	else
6377 		ironlake_pfit_enable(pipe_config);
6378 
6379 	/*
6380 	 * On ILK+ LUT must be loaded before the pipe is running but with
6381 	 * clocks enabled
6382 	 */
6383 	intel_color_load_luts(pipe_config);
6384 	intel_color_commit(pipe_config);
6385 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
6386 	if (INTEL_GEN(dev_priv) < 9)
6387 		intel_disable_primary_plane(pipe_config);
6388 
6389 	if (INTEL_GEN(dev_priv) >= 11)
6390 		icl_set_pipe_chicken(intel_crtc);
6391 
6392 	intel_ddi_set_pipe_settings(pipe_config);
6393 	if (!transcoder_is_dsi(cpu_transcoder))
6394 		intel_ddi_enable_transcoder_func(pipe_config);
6395 
6396 	if (dev_priv->display.initial_watermarks != NULL)
6397 		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
6398 
6399 	if (INTEL_GEN(dev_priv) >= 11)
6400 		icl_pipe_mbus_enable(intel_crtc);
6401 
6402 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
6403 	if (!transcoder_is_dsi(cpu_transcoder))
6404 		intel_enable_pipe(pipe_config);
6405 
6406 	if (pipe_config->has_pch_encoder)
6407 		lpt_pch_enable(old_intel_state, pipe_config);
6408 
6409 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6410 		intel_ddi_set_vc_payload_alloc(pipe_config, true);
6411 
6412 	assert_vblank_disabled(crtc);
6413 	intel_crtc_vblank_on(pipe_config);
6414 
6415 	intel_encoders_enable(crtc, pipe_config, old_state);
6416 
6417 	if (psl_clkgate_wa) {
6418 		intel_wait_for_vblank(dev_priv, pipe);
6419 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6420 	}
6421 
6422 	/* If we change the relative order between pipe/planes enabling, we need
6423 	 * to change the workaround. */
6424 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6425 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6426 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6427 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6428 	}
6429 }
6430 
6431 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6432 {
6433 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6434 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6435 	enum pipe pipe = crtc->pipe;
6436 
6437 	/* To avoid upsetting the power well on haswell only disable the pfit if
6438 	 * it's in use. The hw state code will make sure we get this right. */
6439 	if (old_crtc_state->pch_pfit.enabled) {
6440 		I915_WRITE(PF_CTL(pipe), 0);
6441 		I915_WRITE(PF_WIN_POS(pipe), 0);
6442 		I915_WRITE(PF_WIN_SZ(pipe), 0);
6443 	}
6444 }
6445 
6446 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6447 				  struct drm_atomic_state *old_state)
6448 {
6449 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
6450 	struct drm_device *dev = crtc->dev;
6451 	struct drm_i915_private *dev_priv = to_i915(dev);
6452 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6453 	int pipe = intel_crtc->pipe;
6454 
6455 	/*
6456 	 * Sometimes spurious CPU pipe underruns happen when the
6457 	 * pipe is already disabled, but FDI RX/TX is still enabled.
6458 	 * Happens at least with VGA+HDMI cloning. Suppress them.
6459 	 */
6460 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6461 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6462 
6463 	intel_encoders_disable(crtc, old_crtc_state, old_state);
6464 
6465 	drm_crtc_vblank_off(crtc);
6466 	assert_vblank_disabled(crtc);
6467 
6468 	intel_disable_pipe(old_crtc_state);
6469 
6470 	ironlake_pfit_disable(old_crtc_state);
6471 
6472 	if (old_crtc_state->has_pch_encoder)
6473 		ironlake_fdi_disable(crtc);
6474 
6475 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6476 
6477 	if (old_crtc_state->has_pch_encoder) {
6478 		ironlake_disable_pch_transcoder(dev_priv, pipe);
6479 
6480 		if (HAS_PCH_CPT(dev_priv)) {
6481 			i915_reg_t reg;
6482 			u32 temp;
6483 
6484 			/* disable TRANS_DP_CTL */
6485 			reg = TRANS_DP_CTL(pipe);
6486 			temp = I915_READ(reg);
6487 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6488 				  TRANS_DP_PORT_SEL_MASK);
6489 			temp |= TRANS_DP_PORT_SEL_NONE;
6490 			I915_WRITE(reg, temp);
6491 
6492 			/* disable DPLL_SEL */
6493 			temp = I915_READ(PCH_DPLL_SEL);
6494 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6495 			I915_WRITE(PCH_DPLL_SEL, temp);
6496 		}
6497 
6498 		ironlake_fdi_pll_disable(intel_crtc);
6499 	}
6500 
6501 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6502 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6503 }
6504 
6505 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6506 				 struct drm_atomic_state *old_state)
6507 {
6508 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
6509 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6510 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6511 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6512 
6513 	intel_encoders_disable(crtc, old_crtc_state, old_state);
6514 
6515 	drm_crtc_vblank_off(crtc);
6516 	assert_vblank_disabled(crtc);
6517 
6518 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
6519 	if (!transcoder_is_dsi(cpu_transcoder))
6520 		intel_disable_pipe(old_crtc_state);
6521 
6522 	if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6523 		intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6524 
6525 	if (!transcoder_is_dsi(cpu_transcoder))
6526 		intel_ddi_disable_transcoder_func(old_crtc_state);
6527 
6528 	intel_dsc_disable(old_crtc_state);
6529 
6530 	if (INTEL_GEN(dev_priv) >= 9)
6531 		skylake_scaler_disable(intel_crtc);
6532 	else
6533 		ironlake_pfit_disable(old_crtc_state);
6534 
6535 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6536 
6537 	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6538 }
6539 
6540 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6541 {
6542 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6543 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6544 
6545 	if (!crtc_state->gmch_pfit.control)
6546 		return;
6547 
6548 	/*
6549 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
6550 	 * according to register description and PRM.
6551 	 */
6552 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6553 	assert_pipe_disabled(dev_priv, crtc->pipe);
6554 
6555 	I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6556 	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6557 
6558 	/* Border color in case we don't scale up to the full screen. Black by
6559 	 * default, change to something else for debugging. */
6560 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
6561 }
6562 
6563 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6564 {
6565 	if (port == PORT_NONE)
6566 		return false;
6567 
6568 	if (IS_ELKHARTLAKE(dev_priv))
6569 		return port <= PORT_C;
6570 
6571 	if (INTEL_GEN(dev_priv) >= 11)
6572 		return port <= PORT_B;
6573 
6574 	return false;
6575 }
6576 
6577 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6578 {
6579 	if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6580 		return port >= PORT_C && port <= PORT_F;
6581 
6582 	return false;
6583 }
6584 
6585 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6586 {
6587 	if (!intel_port_is_tc(dev_priv, port))
6588 		return PORT_TC_NONE;
6589 
6590 	return port - PORT_C;
6591 }
6592 
6593 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6594 {
6595 	switch (port) {
6596 	case PORT_A:
6597 		return POWER_DOMAIN_PORT_DDI_A_LANES;
6598 	case PORT_B:
6599 		return POWER_DOMAIN_PORT_DDI_B_LANES;
6600 	case PORT_C:
6601 		return POWER_DOMAIN_PORT_DDI_C_LANES;
6602 	case PORT_D:
6603 		return POWER_DOMAIN_PORT_DDI_D_LANES;
6604 	case PORT_E:
6605 		return POWER_DOMAIN_PORT_DDI_E_LANES;
6606 	case PORT_F:
6607 		return POWER_DOMAIN_PORT_DDI_F_LANES;
6608 	default:
6609 		MISSING_CASE(port);
6610 		return POWER_DOMAIN_PORT_OTHER;
6611 	}
6612 }
6613 
6614 enum intel_display_power_domain
6615 intel_aux_power_domain(struct intel_digital_port *dig_port)
6616 {
6617 	switch (dig_port->aux_ch) {
6618 	case AUX_CH_A:
6619 		return POWER_DOMAIN_AUX_A;
6620 	case AUX_CH_B:
6621 		return POWER_DOMAIN_AUX_B;
6622 	case AUX_CH_C:
6623 		return POWER_DOMAIN_AUX_C;
6624 	case AUX_CH_D:
6625 		return POWER_DOMAIN_AUX_D;
6626 	case AUX_CH_E:
6627 		return POWER_DOMAIN_AUX_E;
6628 	case AUX_CH_F:
6629 		return POWER_DOMAIN_AUX_F;
6630 	default:
6631 		MISSING_CASE(dig_port->aux_ch);
6632 		return POWER_DOMAIN_AUX_A;
6633 	}
6634 }
6635 
6636 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6637 				  struct intel_crtc_state *crtc_state)
6638 {
6639 	struct drm_device *dev = crtc->dev;
6640 	struct drm_i915_private *dev_priv = to_i915(dev);
6641 	struct drm_encoder *encoder;
6642 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6643 	enum pipe pipe = intel_crtc->pipe;
6644 	u64 mask;
6645 	enum transcoder transcoder = crtc_state->cpu_transcoder;
6646 
6647 	if (!crtc_state->base.active)
6648 		return 0;
6649 
6650 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6651 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6652 	if (crtc_state->pch_pfit.enabled ||
6653 	    crtc_state->pch_pfit.force_thru)
6654 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6655 
6656 	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6657 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6658 
6659 		mask |= BIT_ULL(intel_encoder->power_domain);
6660 	}
6661 
6662 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6663 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6664 
6665 	if (crtc_state->shared_dpll)
6666 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6667 
6668 	return mask;
6669 }
6670 
6671 static u64
6672 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6673 			       struct intel_crtc_state *crtc_state)
6674 {
6675 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6676 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6677 	enum intel_display_power_domain domain;
6678 	u64 domains, new_domains, old_domains;
6679 
6680 	old_domains = intel_crtc->enabled_power_domains;
6681 	intel_crtc->enabled_power_domains = new_domains =
6682 		get_crtc_power_domains(crtc, crtc_state);
6683 
6684 	domains = new_domains & ~old_domains;
6685 
6686 	for_each_power_domain(domain, domains)
6687 		intel_display_power_get(dev_priv, domain);
6688 
6689 	return old_domains & ~new_domains;
6690 }
6691 
6692 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6693 				      u64 domains)
6694 {
6695 	enum intel_display_power_domain domain;
6696 
6697 	for_each_power_domain(domain, domains)
6698 		intel_display_power_put_unchecked(dev_priv, domain);
6699 }
6700 
6701 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6702 				   struct drm_atomic_state *old_state)
6703 {
6704 	struct intel_atomic_state *old_intel_state =
6705 		to_intel_atomic_state(old_state);
6706 	struct drm_crtc *crtc = pipe_config->base.crtc;
6707 	struct drm_device *dev = crtc->dev;
6708 	struct drm_i915_private *dev_priv = to_i915(dev);
6709 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6710 	int pipe = intel_crtc->pipe;
6711 
6712 	if (WARN_ON(intel_crtc->active))
6713 		return;
6714 
6715 	if (intel_crtc_has_dp_encoder(pipe_config))
6716 		intel_dp_set_m_n(pipe_config, M1_N1);
6717 
6718 	intel_set_pipe_timings(pipe_config);
6719 	intel_set_pipe_src_size(pipe_config);
6720 
6721 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6722 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6723 		I915_WRITE(CHV_CANVAS(pipe), 0);
6724 	}
6725 
6726 	i9xx_set_pipeconf(pipe_config);
6727 
6728 	intel_crtc->active = true;
6729 
6730 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6731 
6732 	intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6733 
6734 	if (IS_CHERRYVIEW(dev_priv)) {
6735 		chv_prepare_pll(intel_crtc, pipe_config);
6736 		chv_enable_pll(intel_crtc, pipe_config);
6737 	} else {
6738 		vlv_prepare_pll(intel_crtc, pipe_config);
6739 		vlv_enable_pll(intel_crtc, pipe_config);
6740 	}
6741 
6742 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
6743 
6744 	i9xx_pfit_enable(pipe_config);
6745 
6746 	intel_color_load_luts(pipe_config);
6747 	intel_color_commit(pipe_config);
6748 	/* update DSPCNTR to configure gamma for pipe bottom color */
6749 	intel_disable_primary_plane(pipe_config);
6750 
6751 	dev_priv->display.initial_watermarks(old_intel_state,
6752 					     pipe_config);
6753 	intel_enable_pipe(pipe_config);
6754 
6755 	assert_vblank_disabled(crtc);
6756 	intel_crtc_vblank_on(pipe_config);
6757 
6758 	intel_encoders_enable(crtc, pipe_config, old_state);
6759 }
6760 
6761 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6762 {
6763 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6764 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6765 
6766 	I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6767 	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6768 }
6769 
6770 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6771 			     struct drm_atomic_state *old_state)
6772 {
6773 	struct intel_atomic_state *old_intel_state =
6774 		to_intel_atomic_state(old_state);
6775 	struct drm_crtc *crtc = pipe_config->base.crtc;
6776 	struct drm_device *dev = crtc->dev;
6777 	struct drm_i915_private *dev_priv = to_i915(dev);
6778 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6779 	enum pipe pipe = intel_crtc->pipe;
6780 
6781 	if (WARN_ON(intel_crtc->active))
6782 		return;
6783 
6784 	i9xx_set_pll_dividers(pipe_config);
6785 
6786 	if (intel_crtc_has_dp_encoder(pipe_config))
6787 		intel_dp_set_m_n(pipe_config, M1_N1);
6788 
6789 	intel_set_pipe_timings(pipe_config);
6790 	intel_set_pipe_src_size(pipe_config);
6791 
6792 	i9xx_set_pipeconf(pipe_config);
6793 
6794 	intel_crtc->active = true;
6795 
6796 	if (!IS_GEN(dev_priv, 2))
6797 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6798 
6799 	intel_encoders_pre_enable(crtc, pipe_config, old_state);
6800 
6801 	i9xx_enable_pll(intel_crtc, pipe_config);
6802 
6803 	i9xx_pfit_enable(pipe_config);
6804 
6805 	intel_color_load_luts(pipe_config);
6806 	intel_color_commit(pipe_config);
6807 	/* update DSPCNTR to configure gamma for pipe bottom color */
6808 	intel_disable_primary_plane(pipe_config);
6809 
6810 	if (dev_priv->display.initial_watermarks != NULL)
6811 		dev_priv->display.initial_watermarks(old_intel_state,
6812 						     pipe_config);
6813 	else
6814 		intel_update_watermarks(intel_crtc);
6815 	intel_enable_pipe(pipe_config);
6816 
6817 	assert_vblank_disabled(crtc);
6818 	intel_crtc_vblank_on(pipe_config);
6819 
6820 	intel_encoders_enable(crtc, pipe_config, old_state);
6821 }
6822 
6823 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6824 {
6825 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6826 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6827 
6828 	if (!old_crtc_state->gmch_pfit.control)
6829 		return;
6830 
6831 	assert_pipe_disabled(dev_priv, crtc->pipe);
6832 
6833 	DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6834 		      I915_READ(PFIT_CONTROL));
6835 	I915_WRITE(PFIT_CONTROL, 0);
6836 }
6837 
6838 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6839 			      struct drm_atomic_state *old_state)
6840 {
6841 	struct drm_crtc *crtc = old_crtc_state->base.crtc;
6842 	struct drm_device *dev = crtc->dev;
6843 	struct drm_i915_private *dev_priv = to_i915(dev);
6844 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6845 	int pipe = intel_crtc->pipe;
6846 
6847 	/*
6848 	 * On gen2 planes are double buffered but the pipe isn't, so we must
6849 	 * wait for planes to fully turn off before disabling the pipe.
6850 	 */
6851 	if (IS_GEN(dev_priv, 2))
6852 		intel_wait_for_vblank(dev_priv, pipe);
6853 
6854 	intel_encoders_disable(crtc, old_crtc_state, old_state);
6855 
6856 	drm_crtc_vblank_off(crtc);
6857 	assert_vblank_disabled(crtc);
6858 
6859 	intel_disable_pipe(old_crtc_state);
6860 
6861 	i9xx_pfit_disable(old_crtc_state);
6862 
6863 	intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6864 
6865 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6866 		if (IS_CHERRYVIEW(dev_priv))
6867 			chv_disable_pll(dev_priv, pipe);
6868 		else if (IS_VALLEYVIEW(dev_priv))
6869 			vlv_disable_pll(dev_priv, pipe);
6870 		else
6871 			i9xx_disable_pll(old_crtc_state);
6872 	}
6873 
6874 	intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6875 
6876 	if (!IS_GEN(dev_priv, 2))
6877 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6878 
6879 	if (!dev_priv->display.initial_watermarks)
6880 		intel_update_watermarks(intel_crtc);
6881 
6882 	/* clock the pipe down to 640x480@60 to potentially save power */
6883 	if (IS_I830(dev_priv))
6884 		i830_enable_pipe(dev_priv, pipe);
6885 }
6886 
6887 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6888 					struct drm_modeset_acquire_ctx *ctx)
6889 {
6890 	struct intel_encoder *encoder;
6891 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6892 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6893 	struct intel_bw_state *bw_state =
6894 		to_intel_bw_state(dev_priv->bw_obj.state);
6895 	enum intel_display_power_domain domain;
6896 	struct intel_plane *plane;
6897 	u64 domains;
6898 	struct drm_atomic_state *state;
6899 	struct intel_crtc_state *crtc_state;
6900 	int ret;
6901 
6902 	if (!intel_crtc->active)
6903 		return;
6904 
6905 	for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6906 		const struct intel_plane_state *plane_state =
6907 			to_intel_plane_state(plane->base.state);
6908 
6909 		if (plane_state->base.visible)
6910 			intel_plane_disable_noatomic(intel_crtc, plane);
6911 	}
6912 
6913 	state = drm_atomic_state_alloc(crtc->dev);
6914 	if (!state) {
6915 		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6916 			      crtc->base.id, crtc->name);
6917 		return;
6918 	}
6919 
6920 	state->acquire_ctx = ctx;
6921 
6922 	/* Everything's already locked, -EDEADLK can't happen. */
6923 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6924 	ret = drm_atomic_add_affected_connectors(state, crtc);
6925 
6926 	WARN_ON(IS_ERR(crtc_state) || ret);
6927 
6928 	dev_priv->display.crtc_disable(crtc_state, state);
6929 
6930 	drm_atomic_state_put(state);
6931 
6932 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6933 		      crtc->base.id, crtc->name);
6934 
6935 	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6936 	crtc->state->active = false;
6937 	intel_crtc->active = false;
6938 	crtc->enabled = false;
6939 	crtc->state->connector_mask = 0;
6940 	crtc->state->encoder_mask = 0;
6941 
6942 	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6943 		encoder->base.crtc = NULL;
6944 
6945 	intel_fbc_disable(intel_crtc);
6946 	intel_update_watermarks(intel_crtc);
6947 	intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6948 
6949 	domains = intel_crtc->enabled_power_domains;
6950 	for_each_power_domain(domain, domains)
6951 		intel_display_power_put_unchecked(dev_priv, domain);
6952 	intel_crtc->enabled_power_domains = 0;
6953 
6954 	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6955 	dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6956 	dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6957 
6958 	bw_state->data_rate[intel_crtc->pipe] = 0;
6959 	bw_state->num_active_planes[intel_crtc->pipe] = 0;
6960 }
6961 
6962 /*
6963  * turn all crtc's off, but do not adjust state
6964  * This has to be paired with a call to intel_modeset_setup_hw_state.
6965  */
6966 int intel_display_suspend(struct drm_device *dev)
6967 {
6968 	struct drm_i915_private *dev_priv = to_i915(dev);
6969 	struct drm_atomic_state *state;
6970 	int ret;
6971 
6972 	state = drm_atomic_helper_suspend(dev);
6973 	ret = PTR_ERR_OR_ZERO(state);
6974 	if (ret)
6975 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6976 	else
6977 		dev_priv->modeset_restore_state = state;
6978 	return ret;
6979 }
6980 
6981 void intel_encoder_destroy(struct drm_encoder *encoder)
6982 {
6983 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6984 
6985 	drm_encoder_cleanup(encoder);
6986 	kfree(intel_encoder);
6987 }
6988 
6989 /* Cross check the actual hw state with our own modeset state tracking (and it's
6990  * internal consistency). */
6991 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6992 					 struct drm_connector_state *conn_state)
6993 {
6994 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
6995 
6996 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6997 		      connector->base.base.id,
6998 		      connector->base.name);
6999 
7000 	if (connector->get_hw_state(connector)) {
7001 		struct intel_encoder *encoder = connector->encoder;
7002 
7003 		I915_STATE_WARN(!crtc_state,
7004 			 "connector enabled without attached crtc\n");
7005 
7006 		if (!crtc_state)
7007 			return;
7008 
7009 		I915_STATE_WARN(!crtc_state->active,
7010 		      "connector is active, but attached crtc isn't\n");
7011 
7012 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7013 			return;
7014 
7015 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7016 			"atomic encoder doesn't match attached encoder\n");
7017 
7018 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7019 			"attached encoder crtc differs from connector crtc\n");
7020 	} else {
7021 		I915_STATE_WARN(crtc_state && crtc_state->active,
7022 			"attached crtc is active, but connector isn't\n");
7023 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7024 			"best encoder set without crtc!\n");
7025 	}
7026 }
7027 
7028 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7029 {
7030 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7031 		return crtc_state->fdi_lanes;
7032 
7033 	return 0;
7034 }
7035 
7036 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7037 				     struct intel_crtc_state *pipe_config)
7038 {
7039 	struct drm_i915_private *dev_priv = to_i915(dev);
7040 	struct drm_atomic_state *state = pipe_config->base.state;
7041 	struct intel_crtc *other_crtc;
7042 	struct intel_crtc_state *other_crtc_state;
7043 
7044 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7045 		      pipe_name(pipe), pipe_config->fdi_lanes);
7046 	if (pipe_config->fdi_lanes > 4) {
7047 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7048 			      pipe_name(pipe), pipe_config->fdi_lanes);
7049 		return -EINVAL;
7050 	}
7051 
7052 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7053 		if (pipe_config->fdi_lanes > 2) {
7054 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7055 				      pipe_config->fdi_lanes);
7056 			return -EINVAL;
7057 		} else {
7058 			return 0;
7059 		}
7060 	}
7061 
7062 	if (INTEL_INFO(dev_priv)->num_pipes == 2)
7063 		return 0;
7064 
7065 	/* Ivybridge 3 pipe is really complicated */
7066 	switch (pipe) {
7067 	case PIPE_A:
7068 		return 0;
7069 	case PIPE_B:
7070 		if (pipe_config->fdi_lanes <= 2)
7071 			return 0;
7072 
7073 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7074 		other_crtc_state =
7075 			intel_atomic_get_crtc_state(state, other_crtc);
7076 		if (IS_ERR(other_crtc_state))
7077 			return PTR_ERR(other_crtc_state);
7078 
7079 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7080 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7081 				      pipe_name(pipe), pipe_config->fdi_lanes);
7082 			return -EINVAL;
7083 		}
7084 		return 0;
7085 	case PIPE_C:
7086 		if (pipe_config->fdi_lanes > 2) {
7087 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7088 				      pipe_name(pipe), pipe_config->fdi_lanes);
7089 			return -EINVAL;
7090 		}
7091 
7092 		other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7093 		other_crtc_state =
7094 			intel_atomic_get_crtc_state(state, other_crtc);
7095 		if (IS_ERR(other_crtc_state))
7096 			return PTR_ERR(other_crtc_state);
7097 
7098 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7099 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7100 			return -EINVAL;
7101 		}
7102 		return 0;
7103 	default:
7104 		BUG();
7105 	}
7106 }
7107 
7108 #define RETRY 1
7109 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7110 				       struct intel_crtc_state *pipe_config)
7111 {
7112 	struct drm_device *dev = intel_crtc->base.dev;
7113 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7114 	int lane, link_bw, fdi_dotclock, ret;
7115 	bool needs_recompute = false;
7116 
7117 retry:
7118 	/* FDI is a binary signal running at ~2.7GHz, encoding
7119 	 * each output octet as 10 bits. The actual frequency
7120 	 * is stored as a divider into a 100MHz clock, and the
7121 	 * mode pixel clock is stored in units of 1KHz.
7122 	 * Hence the bw of each lane in terms of the mode signal
7123 	 * is:
7124 	 */
7125 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7126 
7127 	fdi_dotclock = adjusted_mode->crtc_clock;
7128 
7129 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7130 					   pipe_config->pipe_bpp);
7131 
7132 	pipe_config->fdi_lanes = lane;
7133 
7134 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7135 			       link_bw, &pipe_config->fdi_m_n, false);
7136 
7137 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7138 	if (ret == -EDEADLK)
7139 		return ret;
7140 
7141 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7142 		pipe_config->pipe_bpp -= 2*3;
7143 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7144 			      pipe_config->pipe_bpp);
7145 		needs_recompute = true;
7146 		pipe_config->bw_constrained = true;
7147 
7148 		goto retry;
7149 	}
7150 
7151 	if (needs_recompute)
7152 		return RETRY;
7153 
7154 	return ret;
7155 }
7156 
7157 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7158 {
7159 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7160 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7161 
7162 	/* IPS only exists on ULT machines and is tied to pipe A. */
7163 	if (!hsw_crtc_supports_ips(crtc))
7164 		return false;
7165 
7166 	if (!i915_modparams.enable_ips)
7167 		return false;
7168 
7169 	if (crtc_state->pipe_bpp > 24)
7170 		return false;
7171 
7172 	/*
7173 	 * We compare against max which means we must take
7174 	 * the increased cdclk requirement into account when
7175 	 * calculating the new cdclk.
7176 	 *
7177 	 * Should measure whether using a lower cdclk w/o IPS
7178 	 */
7179 	if (IS_BROADWELL(dev_priv) &&
7180 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7181 		return false;
7182 
7183 	return true;
7184 }
7185 
7186 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7187 {
7188 	struct drm_i915_private *dev_priv =
7189 		to_i915(crtc_state->base.crtc->dev);
7190 	struct intel_atomic_state *intel_state =
7191 		to_intel_atomic_state(crtc_state->base.state);
7192 
7193 	if (!hsw_crtc_state_ips_capable(crtc_state))
7194 		return false;
7195 
7196 	/*
7197 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7198 	 * enabled and disabled dynamically based on package C states,
7199 	 * user space can't make reliable use of the CRCs, so let's just
7200 	 * completely disable it.
7201 	 */
7202 	if (crtc_state->crc_enabled)
7203 		return false;
7204 
7205 	/* IPS should be fine as long as at least one plane is enabled. */
7206 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7207 		return false;
7208 
7209 	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7210 	if (IS_BROADWELL(dev_priv) &&
7211 	    crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7212 		return false;
7213 
7214 	return true;
7215 }
7216 
7217 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7218 {
7219 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7220 
7221 	/* GDG double wide on either pipe, otherwise pipe A only */
7222 	return INTEL_GEN(dev_priv) < 4 &&
7223 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7224 }
7225 
7226 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7227 {
7228 	u32 pixel_rate;
7229 
7230 	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7231 
7232 	/*
7233 	 * We only use IF-ID interlacing. If we ever use
7234 	 * PF-ID we'll need to adjust the pixel_rate here.
7235 	 */
7236 
7237 	if (pipe_config->pch_pfit.enabled) {
7238 		u64 pipe_w, pipe_h, pfit_w, pfit_h;
7239 		u32 pfit_size = pipe_config->pch_pfit.size;
7240 
7241 		pipe_w = pipe_config->pipe_src_w;
7242 		pipe_h = pipe_config->pipe_src_h;
7243 
7244 		pfit_w = (pfit_size >> 16) & 0xFFFF;
7245 		pfit_h = pfit_size & 0xFFFF;
7246 		if (pipe_w < pfit_w)
7247 			pipe_w = pfit_w;
7248 		if (pipe_h < pfit_h)
7249 			pipe_h = pfit_h;
7250 
7251 		if (WARN_ON(!pfit_w || !pfit_h))
7252 			return pixel_rate;
7253 
7254 		pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7255 				     pfit_w * pfit_h);
7256 	}
7257 
7258 	return pixel_rate;
7259 }
7260 
7261 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7262 {
7263 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7264 
7265 	if (HAS_GMCH(dev_priv))
7266 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
7267 		crtc_state->pixel_rate =
7268 			crtc_state->base.adjusted_mode.crtc_clock;
7269 	else
7270 		crtc_state->pixel_rate =
7271 			ilk_pipe_pixel_rate(crtc_state);
7272 }
7273 
7274 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7275 				     struct intel_crtc_state *pipe_config)
7276 {
7277 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7278 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7279 	int clock_limit = dev_priv->max_dotclk_freq;
7280 
7281 	if (INTEL_GEN(dev_priv) < 4) {
7282 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7283 
7284 		/*
7285 		 * Enable double wide mode when the dot clock
7286 		 * is > 90% of the (display) core speed.
7287 		 */
7288 		if (intel_crtc_supports_double_wide(crtc) &&
7289 		    adjusted_mode->crtc_clock > clock_limit) {
7290 			clock_limit = dev_priv->max_dotclk_freq;
7291 			pipe_config->double_wide = true;
7292 		}
7293 	}
7294 
7295 	if (adjusted_mode->crtc_clock > clock_limit) {
7296 		DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7297 			      adjusted_mode->crtc_clock, clock_limit,
7298 			      yesno(pipe_config->double_wide));
7299 		return -EINVAL;
7300 	}
7301 
7302 	if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7303 	     pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7304 	     pipe_config->base.ctm) {
7305 		/*
7306 		 * There is only one pipe CSC unit per pipe, and we need that
7307 		 * for output conversion from RGB->YCBCR. So if CTM is already
7308 		 * applied we can't support YCBCR420 output.
7309 		 */
7310 		DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7311 		return -EINVAL;
7312 	}
7313 
7314 	/*
7315 	 * Pipe horizontal size must be even in:
7316 	 * - DVO ganged mode
7317 	 * - LVDS dual channel mode
7318 	 * - Double wide pipe
7319 	 */
7320 	if (pipe_config->pipe_src_w & 1) {
7321 		if (pipe_config->double_wide) {
7322 			DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7323 			return -EINVAL;
7324 		}
7325 
7326 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7327 		    intel_is_dual_link_lvds(dev_priv)) {
7328 			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7329 			return -EINVAL;
7330 		}
7331 	}
7332 
7333 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
7334 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7335 	 */
7336 	if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7337 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7338 		return -EINVAL;
7339 
7340 	intel_crtc_compute_pixel_rate(pipe_config);
7341 
7342 	if (pipe_config->has_pch_encoder)
7343 		return ironlake_fdi_compute_config(crtc, pipe_config);
7344 
7345 	return 0;
7346 }
7347 
7348 static void
7349 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7350 {
7351 	while (*num > DATA_LINK_M_N_MASK ||
7352 	       *den > DATA_LINK_M_N_MASK) {
7353 		*num >>= 1;
7354 		*den >>= 1;
7355 	}
7356 }
7357 
7358 static void compute_m_n(unsigned int m, unsigned int n,
7359 			u32 *ret_m, u32 *ret_n,
7360 			bool constant_n)
7361 {
7362 	/*
7363 	 * Several DP dongles in particular seem to be fussy about
7364 	 * too large link M/N values. Give N value as 0x8000 that
7365 	 * should be acceptable by specific devices. 0x8000 is the
7366 	 * specified fixed N value for asynchronous clock mode,
7367 	 * which the devices expect also in synchronous clock mode.
7368 	 */
7369 	if (constant_n)
7370 		*ret_n = 0x8000;
7371 	else
7372 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7373 
7374 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7375 	intel_reduce_m_n_ratio(ret_m, ret_n);
7376 }
7377 
7378 void
7379 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7380 		       int pixel_clock, int link_clock,
7381 		       struct intel_link_m_n *m_n,
7382 		       bool constant_n)
7383 {
7384 	m_n->tu = 64;
7385 
7386 	compute_m_n(bits_per_pixel * pixel_clock,
7387 		    link_clock * nlanes * 8,
7388 		    &m_n->gmch_m, &m_n->gmch_n,
7389 		    constant_n);
7390 
7391 	compute_m_n(pixel_clock, link_clock,
7392 		    &m_n->link_m, &m_n->link_n,
7393 		    constant_n);
7394 }
7395 
7396 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7397 {
7398 	if (i915_modparams.panel_use_ssc >= 0)
7399 		return i915_modparams.panel_use_ssc != 0;
7400 	return dev_priv->vbt.lvds_use_ssc
7401 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7402 }
7403 
7404 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7405 {
7406 	return (1 << dpll->n) << 16 | dpll->m2;
7407 }
7408 
7409 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7410 {
7411 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7412 }
7413 
7414 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7415 				     struct intel_crtc_state *crtc_state,
7416 				     struct dpll *reduced_clock)
7417 {
7418 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7419 	u32 fp, fp2 = 0;
7420 
7421 	if (IS_PINEVIEW(dev_priv)) {
7422 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7423 		if (reduced_clock)
7424 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7425 	} else {
7426 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7427 		if (reduced_clock)
7428 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7429 	}
7430 
7431 	crtc_state->dpll_hw_state.fp0 = fp;
7432 
7433 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7434 	    reduced_clock) {
7435 		crtc_state->dpll_hw_state.fp1 = fp2;
7436 	} else {
7437 		crtc_state->dpll_hw_state.fp1 = fp;
7438 	}
7439 }
7440 
7441 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7442 		pipe)
7443 {
7444 	u32 reg_val;
7445 
7446 	/*
7447 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7448 	 * and set it to a reasonable value instead.
7449 	 */
7450 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7451 	reg_val &= 0xffffff00;
7452 	reg_val |= 0x00000030;
7453 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7454 
7455 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7456 	reg_val &= 0x00ffffff;
7457 	reg_val |= 0x8c000000;
7458 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7459 
7460 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7461 	reg_val &= 0xffffff00;
7462 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7463 
7464 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7465 	reg_val &= 0x00ffffff;
7466 	reg_val |= 0xb0000000;
7467 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7468 }
7469 
7470 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7471 					 const struct intel_link_m_n *m_n)
7472 {
7473 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7474 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7475 	enum pipe pipe = crtc->pipe;
7476 
7477 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7478 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7479 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7480 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7481 }
7482 
7483 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7484 				 enum transcoder transcoder)
7485 {
7486 	if (IS_HASWELL(dev_priv))
7487 		return transcoder == TRANSCODER_EDP;
7488 
7489 	/*
7490 	 * Strictly speaking some registers are available before
7491 	 * gen7, but we only support DRRS on gen7+
7492 	 */
7493 	return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7494 }
7495 
7496 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7497 					 const struct intel_link_m_n *m_n,
7498 					 const struct intel_link_m_n *m2_n2)
7499 {
7500 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7501 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7502 	enum pipe pipe = crtc->pipe;
7503 	enum transcoder transcoder = crtc_state->cpu_transcoder;
7504 
7505 	if (INTEL_GEN(dev_priv) >= 5) {
7506 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7507 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7508 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7509 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7510 		/*
7511 		 *  M2_N2 registers are set only if DRRS is supported
7512 		 * (to make sure the registers are not unnecessarily accessed).
7513 		 */
7514 		if (m2_n2 && crtc_state->has_drrs &&
7515 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
7516 			I915_WRITE(PIPE_DATA_M2(transcoder),
7517 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7518 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7519 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7520 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7521 		}
7522 	} else {
7523 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7524 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7525 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7526 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7527 	}
7528 }
7529 
7530 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7531 {
7532 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7533 
7534 	if (m_n == M1_N1) {
7535 		dp_m_n = &crtc_state->dp_m_n;
7536 		dp_m2_n2 = &crtc_state->dp_m2_n2;
7537 	} else if (m_n == M2_N2) {
7538 
7539 		/*
7540 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7541 		 * needs to be programmed into M1_N1.
7542 		 */
7543 		dp_m_n = &crtc_state->dp_m2_n2;
7544 	} else {
7545 		DRM_ERROR("Unsupported divider value\n");
7546 		return;
7547 	}
7548 
7549 	if (crtc_state->has_pch_encoder)
7550 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7551 	else
7552 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7553 }
7554 
7555 static void vlv_compute_dpll(struct intel_crtc *crtc,
7556 			     struct intel_crtc_state *pipe_config)
7557 {
7558 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7559 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7560 	if (crtc->pipe != PIPE_A)
7561 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7562 
7563 	/* DPLL not used with DSI, but still need the rest set up */
7564 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7565 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7566 			DPLL_EXT_BUFFER_ENABLE_VLV;
7567 
7568 	pipe_config->dpll_hw_state.dpll_md =
7569 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7570 }
7571 
7572 static void chv_compute_dpll(struct intel_crtc *crtc,
7573 			     struct intel_crtc_state *pipe_config)
7574 {
7575 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7576 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7577 	if (crtc->pipe != PIPE_A)
7578 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7579 
7580 	/* DPLL not used with DSI, but still need the rest set up */
7581 	if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7582 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7583 
7584 	pipe_config->dpll_hw_state.dpll_md =
7585 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7586 }
7587 
7588 static void vlv_prepare_pll(struct intel_crtc *crtc,
7589 			    const struct intel_crtc_state *pipe_config)
7590 {
7591 	struct drm_device *dev = crtc->base.dev;
7592 	struct drm_i915_private *dev_priv = to_i915(dev);
7593 	enum pipe pipe = crtc->pipe;
7594 	u32 mdiv;
7595 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7596 	u32 coreclk, reg_val;
7597 
7598 	/* Enable Refclk */
7599 	I915_WRITE(DPLL(pipe),
7600 		   pipe_config->dpll_hw_state.dpll &
7601 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7602 
7603 	/* No need to actually set up the DPLL with DSI */
7604 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7605 		return;
7606 
7607 	vlv_dpio_get(dev_priv);
7608 
7609 	bestn = pipe_config->dpll.n;
7610 	bestm1 = pipe_config->dpll.m1;
7611 	bestm2 = pipe_config->dpll.m2;
7612 	bestp1 = pipe_config->dpll.p1;
7613 	bestp2 = pipe_config->dpll.p2;
7614 
7615 	/* See eDP HDMI DPIO driver vbios notes doc */
7616 
7617 	/* PLL B needs special handling */
7618 	if (pipe == PIPE_B)
7619 		vlv_pllb_recal_opamp(dev_priv, pipe);
7620 
7621 	/* Set up Tx target for periodic Rcomp update */
7622 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7623 
7624 	/* Disable target IRef on PLL */
7625 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7626 	reg_val &= 0x00ffffff;
7627 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7628 
7629 	/* Disable fast lock */
7630 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7631 
7632 	/* Set idtafcrecal before PLL is enabled */
7633 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7634 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7635 	mdiv |= ((bestn << DPIO_N_SHIFT));
7636 	mdiv |= (1 << DPIO_K_SHIFT);
7637 
7638 	/*
7639 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7640 	 * but we don't support that).
7641 	 * Note: don't use the DAC post divider as it seems unstable.
7642 	 */
7643 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7644 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7645 
7646 	mdiv |= DPIO_ENABLE_CALIBRATION;
7647 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7648 
7649 	/* Set HBR and RBR LPF coefficients */
7650 	if (pipe_config->port_clock == 162000 ||
7651 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7652 	    intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7653 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7654 				 0x009f0003);
7655 	else
7656 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7657 				 0x00d0000f);
7658 
7659 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7660 		/* Use SSC source */
7661 		if (pipe == PIPE_A)
7662 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7663 					 0x0df40000);
7664 		else
7665 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7666 					 0x0df70000);
7667 	} else { /* HDMI or VGA */
7668 		/* Use bend source */
7669 		if (pipe == PIPE_A)
7670 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7671 					 0x0df70000);
7672 		else
7673 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7674 					 0x0df40000);
7675 	}
7676 
7677 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7678 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7679 	if (intel_crtc_has_dp_encoder(pipe_config))
7680 		coreclk |= 0x01000000;
7681 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7682 
7683 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7684 
7685 	vlv_dpio_put(dev_priv);
7686 }
7687 
7688 static void chv_prepare_pll(struct intel_crtc *crtc,
7689 			    const struct intel_crtc_state *pipe_config)
7690 {
7691 	struct drm_device *dev = crtc->base.dev;
7692 	struct drm_i915_private *dev_priv = to_i915(dev);
7693 	enum pipe pipe = crtc->pipe;
7694 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7695 	u32 loopfilter, tribuf_calcntr;
7696 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7697 	u32 dpio_val;
7698 	int vco;
7699 
7700 	/* Enable Refclk and SSC */
7701 	I915_WRITE(DPLL(pipe),
7702 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7703 
7704 	/* No need to actually set up the DPLL with DSI */
7705 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7706 		return;
7707 
7708 	bestn = pipe_config->dpll.n;
7709 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7710 	bestm1 = pipe_config->dpll.m1;
7711 	bestm2 = pipe_config->dpll.m2 >> 22;
7712 	bestp1 = pipe_config->dpll.p1;
7713 	bestp2 = pipe_config->dpll.p2;
7714 	vco = pipe_config->dpll.vco;
7715 	dpio_val = 0;
7716 	loopfilter = 0;
7717 
7718 	vlv_dpio_get(dev_priv);
7719 
7720 	/* p1 and p2 divider */
7721 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7722 			5 << DPIO_CHV_S1_DIV_SHIFT |
7723 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7724 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7725 			1 << DPIO_CHV_K_DIV_SHIFT);
7726 
7727 	/* Feedback post-divider - m2 */
7728 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7729 
7730 	/* Feedback refclk divider - n and m1 */
7731 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7732 			DPIO_CHV_M1_DIV_BY_2 |
7733 			1 << DPIO_CHV_N_DIV_SHIFT);
7734 
7735 	/* M2 fraction division */
7736 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7737 
7738 	/* M2 fraction division enable */
7739 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7740 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7741 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7742 	if (bestm2_frac)
7743 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7744 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7745 
7746 	/* Program digital lock detect threshold */
7747 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7748 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7749 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7750 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7751 	if (!bestm2_frac)
7752 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7753 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7754 
7755 	/* Loop filter */
7756 	if (vco == 5400000) {
7757 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7758 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7759 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7760 		tribuf_calcntr = 0x9;
7761 	} else if (vco <= 6200000) {
7762 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7763 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7764 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7765 		tribuf_calcntr = 0x9;
7766 	} else if (vco <= 6480000) {
7767 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7768 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7769 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7770 		tribuf_calcntr = 0x8;
7771 	} else {
7772 		/* Not supported. Apply the same limits as in the max case */
7773 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7774 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7775 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7776 		tribuf_calcntr = 0;
7777 	}
7778 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7779 
7780 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7781 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7782 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7783 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7784 
7785 	/* AFC Recal */
7786 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7787 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7788 			DPIO_AFC_RECAL);
7789 
7790 	vlv_dpio_put(dev_priv);
7791 }
7792 
7793 /**
7794  * vlv_force_pll_on - forcibly enable just the PLL
7795  * @dev_priv: i915 private structure
7796  * @pipe: pipe PLL to enable
7797  * @dpll: PLL configuration
7798  *
7799  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7800  * in cases where we need the PLL enabled even when @pipe is not going to
7801  * be enabled.
7802  */
7803 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7804 		     const struct dpll *dpll)
7805 {
7806 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7807 	struct intel_crtc_state *pipe_config;
7808 
7809 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7810 	if (!pipe_config)
7811 		return -ENOMEM;
7812 
7813 	pipe_config->base.crtc = &crtc->base;
7814 	pipe_config->pixel_multiplier = 1;
7815 	pipe_config->dpll = *dpll;
7816 
7817 	if (IS_CHERRYVIEW(dev_priv)) {
7818 		chv_compute_dpll(crtc, pipe_config);
7819 		chv_prepare_pll(crtc, pipe_config);
7820 		chv_enable_pll(crtc, pipe_config);
7821 	} else {
7822 		vlv_compute_dpll(crtc, pipe_config);
7823 		vlv_prepare_pll(crtc, pipe_config);
7824 		vlv_enable_pll(crtc, pipe_config);
7825 	}
7826 
7827 	kfree(pipe_config);
7828 
7829 	return 0;
7830 }
7831 
7832 /**
7833  * vlv_force_pll_off - forcibly disable just the PLL
7834  * @dev_priv: i915 private structure
7835  * @pipe: pipe PLL to disable
7836  *
7837  * Disable the PLL for @pipe. To be used in cases where we need
7838  * the PLL enabled even when @pipe is not going to be enabled.
7839  */
7840 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7841 {
7842 	if (IS_CHERRYVIEW(dev_priv))
7843 		chv_disable_pll(dev_priv, pipe);
7844 	else
7845 		vlv_disable_pll(dev_priv, pipe);
7846 }
7847 
7848 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7849 			      struct intel_crtc_state *crtc_state,
7850 			      struct dpll *reduced_clock)
7851 {
7852 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7853 	u32 dpll;
7854 	struct dpll *clock = &crtc_state->dpll;
7855 
7856 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7857 
7858 	dpll = DPLL_VGA_MODE_DIS;
7859 
7860 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7861 		dpll |= DPLLB_MODE_LVDS;
7862 	else
7863 		dpll |= DPLLB_MODE_DAC_SERIAL;
7864 
7865 	if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7866 	    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7867 		dpll |= (crtc_state->pixel_multiplier - 1)
7868 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7869 	}
7870 
7871 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7872 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7873 		dpll |= DPLL_SDVO_HIGH_SPEED;
7874 
7875 	if (intel_crtc_has_dp_encoder(crtc_state))
7876 		dpll |= DPLL_SDVO_HIGH_SPEED;
7877 
7878 	/* compute bitmask from p1 value */
7879 	if (IS_PINEVIEW(dev_priv))
7880 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7881 	else {
7882 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7883 		if (IS_G4X(dev_priv) && reduced_clock)
7884 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7885 	}
7886 	switch (clock->p2) {
7887 	case 5:
7888 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7889 		break;
7890 	case 7:
7891 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7892 		break;
7893 	case 10:
7894 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7895 		break;
7896 	case 14:
7897 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7898 		break;
7899 	}
7900 	if (INTEL_GEN(dev_priv) >= 4)
7901 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7902 
7903 	if (crtc_state->sdvo_tv_clock)
7904 		dpll |= PLL_REF_INPUT_TVCLKINBC;
7905 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7906 		 intel_panel_use_ssc(dev_priv))
7907 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7908 	else
7909 		dpll |= PLL_REF_INPUT_DREFCLK;
7910 
7911 	dpll |= DPLL_VCO_ENABLE;
7912 	crtc_state->dpll_hw_state.dpll = dpll;
7913 
7914 	if (INTEL_GEN(dev_priv) >= 4) {
7915 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7916 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7917 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7918 	}
7919 }
7920 
7921 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7922 			      struct intel_crtc_state *crtc_state,
7923 			      struct dpll *reduced_clock)
7924 {
7925 	struct drm_device *dev = crtc->base.dev;
7926 	struct drm_i915_private *dev_priv = to_i915(dev);
7927 	u32 dpll;
7928 	struct dpll *clock = &crtc_state->dpll;
7929 
7930 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7931 
7932 	dpll = DPLL_VGA_MODE_DIS;
7933 
7934 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7935 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7936 	} else {
7937 		if (clock->p1 == 2)
7938 			dpll |= PLL_P1_DIVIDE_BY_TWO;
7939 		else
7940 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7941 		if (clock->p2 == 4)
7942 			dpll |= PLL_P2_DIVIDE_BY_4;
7943 	}
7944 
7945 	/*
7946 	 * Bspec:
7947 	 * "[Almador Errata}: For the correct operation of the muxed DVO pins
7948 	 *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7949 	 *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7950 	 *  Enable) must be set to “1” in both the DPLL A Control Register
7951 	 *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7952 	 *
7953 	 * For simplicity We simply keep both bits always enabled in
7954 	 * both DPLLS. The spec says we should disable the DVO 2X clock
7955 	 * when not needed, but this seems to work fine in practice.
7956 	 */
7957 	if (IS_I830(dev_priv) ||
7958 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7959 		dpll |= DPLL_DVO_2X_MODE;
7960 
7961 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7962 	    intel_panel_use_ssc(dev_priv))
7963 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7964 	else
7965 		dpll |= PLL_REF_INPUT_DREFCLK;
7966 
7967 	dpll |= DPLL_VCO_ENABLE;
7968 	crtc_state->dpll_hw_state.dpll = dpll;
7969 }
7970 
7971 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7972 {
7973 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7974 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7975 	enum pipe pipe = crtc->pipe;
7976 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7977 	const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7978 	u32 crtc_vtotal, crtc_vblank_end;
7979 	int vsyncshift = 0;
7980 
7981 	/* We need to be careful not to changed the adjusted mode, for otherwise
7982 	 * the hw state checker will get angry at the mismatch. */
7983 	crtc_vtotal = adjusted_mode->crtc_vtotal;
7984 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7985 
7986 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7987 		/* the chip adds 2 halflines automatically */
7988 		crtc_vtotal -= 1;
7989 		crtc_vblank_end -= 1;
7990 
7991 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7992 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7993 		else
7994 			vsyncshift = adjusted_mode->crtc_hsync_start -
7995 				adjusted_mode->crtc_htotal / 2;
7996 		if (vsyncshift < 0)
7997 			vsyncshift += adjusted_mode->crtc_htotal;
7998 	}
7999 
8000 	if (INTEL_GEN(dev_priv) > 3)
8001 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
8002 
8003 	I915_WRITE(HTOTAL(cpu_transcoder),
8004 		   (adjusted_mode->crtc_hdisplay - 1) |
8005 		   ((adjusted_mode->crtc_htotal - 1) << 16));
8006 	I915_WRITE(HBLANK(cpu_transcoder),
8007 		   (adjusted_mode->crtc_hblank_start - 1) |
8008 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
8009 	I915_WRITE(HSYNC(cpu_transcoder),
8010 		   (adjusted_mode->crtc_hsync_start - 1) |
8011 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
8012 
8013 	I915_WRITE(VTOTAL(cpu_transcoder),
8014 		   (adjusted_mode->crtc_vdisplay - 1) |
8015 		   ((crtc_vtotal - 1) << 16));
8016 	I915_WRITE(VBLANK(cpu_transcoder),
8017 		   (adjusted_mode->crtc_vblank_start - 1) |
8018 		   ((crtc_vblank_end - 1) << 16));
8019 	I915_WRITE(VSYNC(cpu_transcoder),
8020 		   (adjusted_mode->crtc_vsync_start - 1) |
8021 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
8022 
8023 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8024 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8025 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8026 	 * bits. */
8027 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8028 	    (pipe == PIPE_B || pipe == PIPE_C))
8029 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8030 
8031 }
8032 
8033 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8034 {
8035 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8036 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8037 	enum pipe pipe = crtc->pipe;
8038 
8039 	/* pipesrc controls the size that is scaled from, which should
8040 	 * always be the user's requested size.
8041 	 */
8042 	I915_WRITE(PIPESRC(pipe),
8043 		   ((crtc_state->pipe_src_w - 1) << 16) |
8044 		   (crtc_state->pipe_src_h - 1));
8045 }
8046 
8047 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8048 				   struct intel_crtc_state *pipe_config)
8049 {
8050 	struct drm_device *dev = crtc->base.dev;
8051 	struct drm_i915_private *dev_priv = to_i915(dev);
8052 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8053 	u32 tmp;
8054 
8055 	tmp = I915_READ(HTOTAL(cpu_transcoder));
8056 	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8057 	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8058 
8059 	if (!transcoder_is_dsi(cpu_transcoder)) {
8060 		tmp = I915_READ(HBLANK(cpu_transcoder));
8061 		pipe_config->base.adjusted_mode.crtc_hblank_start =
8062 							(tmp & 0xffff) + 1;
8063 		pipe_config->base.adjusted_mode.crtc_hblank_end =
8064 						((tmp >> 16) & 0xffff) + 1;
8065 	}
8066 	tmp = I915_READ(HSYNC(cpu_transcoder));
8067 	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8068 	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8069 
8070 	tmp = I915_READ(VTOTAL(cpu_transcoder));
8071 	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8072 	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8073 
8074 	if (!transcoder_is_dsi(cpu_transcoder)) {
8075 		tmp = I915_READ(VBLANK(cpu_transcoder));
8076 		pipe_config->base.adjusted_mode.crtc_vblank_start =
8077 							(tmp & 0xffff) + 1;
8078 		pipe_config->base.adjusted_mode.crtc_vblank_end =
8079 						((tmp >> 16) & 0xffff) + 1;
8080 	}
8081 	tmp = I915_READ(VSYNC(cpu_transcoder));
8082 	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8083 	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8084 
8085 	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
8086 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8087 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8088 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8089 	}
8090 }
8091 
8092 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8093 				    struct intel_crtc_state *pipe_config)
8094 {
8095 	struct drm_device *dev = crtc->base.dev;
8096 	struct drm_i915_private *dev_priv = to_i915(dev);
8097 	u32 tmp;
8098 
8099 	tmp = I915_READ(PIPESRC(crtc->pipe));
8100 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8101 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8102 
8103 	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8104 	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8105 }
8106 
8107 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8108 				 struct intel_crtc_state *pipe_config)
8109 {
8110 	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8111 	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8112 	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8113 	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8114 
8115 	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8116 	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8117 	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8118 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8119 
8120 	mode->flags = pipe_config->base.adjusted_mode.flags;
8121 	mode->type = DRM_MODE_TYPE_DRIVER;
8122 
8123 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8124 
8125 	mode->hsync = drm_mode_hsync(mode);
8126 	mode->vrefresh = drm_mode_vrefresh(mode);
8127 	drm_mode_set_name(mode);
8128 }
8129 
8130 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8131 {
8132 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8133 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8134 	u32 pipeconf;
8135 
8136 	pipeconf = 0;
8137 
8138 	/* we keep both pipes enabled on 830 */
8139 	if (IS_I830(dev_priv))
8140 		pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8141 
8142 	if (crtc_state->double_wide)
8143 		pipeconf |= PIPECONF_DOUBLE_WIDE;
8144 
8145 	/* only g4x and later have fancy bpc/dither controls */
8146 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8147 	    IS_CHERRYVIEW(dev_priv)) {
8148 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
8149 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8150 			pipeconf |= PIPECONF_DITHER_EN |
8151 				    PIPECONF_DITHER_TYPE_SP;
8152 
8153 		switch (crtc_state->pipe_bpp) {
8154 		case 18:
8155 			pipeconf |= PIPECONF_6BPC;
8156 			break;
8157 		case 24:
8158 			pipeconf |= PIPECONF_8BPC;
8159 			break;
8160 		case 30:
8161 			pipeconf |= PIPECONF_10BPC;
8162 			break;
8163 		default:
8164 			/* Case prevented by intel_choose_pipe_bpp_dither. */
8165 			BUG();
8166 		}
8167 	}
8168 
8169 	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8170 		if (INTEL_GEN(dev_priv) < 4 ||
8171 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8172 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8173 		else
8174 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8175 	} else {
8176 		pipeconf |= PIPECONF_PROGRESSIVE;
8177 	}
8178 
8179 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8180 	     crtc_state->limited_color_range)
8181 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8182 
8183 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8184 
8185 	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8186 	POSTING_READ(PIPECONF(crtc->pipe));
8187 }
8188 
8189 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8190 				   struct intel_crtc_state *crtc_state)
8191 {
8192 	struct drm_device *dev = crtc->base.dev;
8193 	struct drm_i915_private *dev_priv = to_i915(dev);
8194 	const struct intel_limit *limit;
8195 	int refclk = 48000;
8196 
8197 	memset(&crtc_state->dpll_hw_state, 0,
8198 	       sizeof(crtc_state->dpll_hw_state));
8199 
8200 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8201 		if (intel_panel_use_ssc(dev_priv)) {
8202 			refclk = dev_priv->vbt.lvds_ssc_freq;
8203 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8204 		}
8205 
8206 		limit = &intel_limits_i8xx_lvds;
8207 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8208 		limit = &intel_limits_i8xx_dvo;
8209 	} else {
8210 		limit = &intel_limits_i8xx_dac;
8211 	}
8212 
8213 	if (!crtc_state->clock_set &&
8214 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8215 				 refclk, NULL, &crtc_state->dpll)) {
8216 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8217 		return -EINVAL;
8218 	}
8219 
8220 	i8xx_compute_dpll(crtc, crtc_state, NULL);
8221 
8222 	return 0;
8223 }
8224 
8225 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8226 				  struct intel_crtc_state *crtc_state)
8227 {
8228 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8229 	const struct intel_limit *limit;
8230 	int refclk = 96000;
8231 
8232 	memset(&crtc_state->dpll_hw_state, 0,
8233 	       sizeof(crtc_state->dpll_hw_state));
8234 
8235 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8236 		if (intel_panel_use_ssc(dev_priv)) {
8237 			refclk = dev_priv->vbt.lvds_ssc_freq;
8238 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8239 		}
8240 
8241 		if (intel_is_dual_link_lvds(dev_priv))
8242 			limit = &intel_limits_g4x_dual_channel_lvds;
8243 		else
8244 			limit = &intel_limits_g4x_single_channel_lvds;
8245 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8246 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8247 		limit = &intel_limits_g4x_hdmi;
8248 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8249 		limit = &intel_limits_g4x_sdvo;
8250 	} else {
8251 		/* The option is for other outputs */
8252 		limit = &intel_limits_i9xx_sdvo;
8253 	}
8254 
8255 	if (!crtc_state->clock_set &&
8256 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8257 				refclk, NULL, &crtc_state->dpll)) {
8258 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8259 		return -EINVAL;
8260 	}
8261 
8262 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8263 
8264 	return 0;
8265 }
8266 
8267 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8268 				  struct intel_crtc_state *crtc_state)
8269 {
8270 	struct drm_device *dev = crtc->base.dev;
8271 	struct drm_i915_private *dev_priv = to_i915(dev);
8272 	const struct intel_limit *limit;
8273 	int refclk = 96000;
8274 
8275 	memset(&crtc_state->dpll_hw_state, 0,
8276 	       sizeof(crtc_state->dpll_hw_state));
8277 
8278 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8279 		if (intel_panel_use_ssc(dev_priv)) {
8280 			refclk = dev_priv->vbt.lvds_ssc_freq;
8281 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8282 		}
8283 
8284 		limit = &intel_limits_pineview_lvds;
8285 	} else {
8286 		limit = &intel_limits_pineview_sdvo;
8287 	}
8288 
8289 	if (!crtc_state->clock_set &&
8290 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8291 				refclk, NULL, &crtc_state->dpll)) {
8292 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8293 		return -EINVAL;
8294 	}
8295 
8296 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8297 
8298 	return 0;
8299 }
8300 
8301 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8302 				   struct intel_crtc_state *crtc_state)
8303 {
8304 	struct drm_device *dev = crtc->base.dev;
8305 	struct drm_i915_private *dev_priv = to_i915(dev);
8306 	const struct intel_limit *limit;
8307 	int refclk = 96000;
8308 
8309 	memset(&crtc_state->dpll_hw_state, 0,
8310 	       sizeof(crtc_state->dpll_hw_state));
8311 
8312 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8313 		if (intel_panel_use_ssc(dev_priv)) {
8314 			refclk = dev_priv->vbt.lvds_ssc_freq;
8315 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8316 		}
8317 
8318 		limit = &intel_limits_i9xx_lvds;
8319 	} else {
8320 		limit = &intel_limits_i9xx_sdvo;
8321 	}
8322 
8323 	if (!crtc_state->clock_set &&
8324 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8325 				 refclk, NULL, &crtc_state->dpll)) {
8326 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8327 		return -EINVAL;
8328 	}
8329 
8330 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8331 
8332 	return 0;
8333 }
8334 
8335 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8336 				  struct intel_crtc_state *crtc_state)
8337 {
8338 	int refclk = 100000;
8339 	const struct intel_limit *limit = &intel_limits_chv;
8340 
8341 	memset(&crtc_state->dpll_hw_state, 0,
8342 	       sizeof(crtc_state->dpll_hw_state));
8343 
8344 	if (!crtc_state->clock_set &&
8345 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8346 				refclk, NULL, &crtc_state->dpll)) {
8347 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8348 		return -EINVAL;
8349 	}
8350 
8351 	chv_compute_dpll(crtc, crtc_state);
8352 
8353 	return 0;
8354 }
8355 
8356 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8357 				  struct intel_crtc_state *crtc_state)
8358 {
8359 	int refclk = 100000;
8360 	const struct intel_limit *limit = &intel_limits_vlv;
8361 
8362 	memset(&crtc_state->dpll_hw_state, 0,
8363 	       sizeof(crtc_state->dpll_hw_state));
8364 
8365 	if (!crtc_state->clock_set &&
8366 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8367 				refclk, NULL, &crtc_state->dpll)) {
8368 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8369 		return -EINVAL;
8370 	}
8371 
8372 	vlv_compute_dpll(crtc, crtc_state);
8373 
8374 	return 0;
8375 }
8376 
8377 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8378 {
8379 	if (IS_I830(dev_priv))
8380 		return false;
8381 
8382 	return INTEL_GEN(dev_priv) >= 4 ||
8383 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8384 }
8385 
8386 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8387 				 struct intel_crtc_state *pipe_config)
8388 {
8389 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8390 	u32 tmp;
8391 
8392 	if (!i9xx_has_pfit(dev_priv))
8393 		return;
8394 
8395 	tmp = I915_READ(PFIT_CONTROL);
8396 	if (!(tmp & PFIT_ENABLE))
8397 		return;
8398 
8399 	/* Check whether the pfit is attached to our pipe. */
8400 	if (INTEL_GEN(dev_priv) < 4) {
8401 		if (crtc->pipe != PIPE_B)
8402 			return;
8403 	} else {
8404 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8405 			return;
8406 	}
8407 
8408 	pipe_config->gmch_pfit.control = tmp;
8409 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8410 }
8411 
8412 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8413 			       struct intel_crtc_state *pipe_config)
8414 {
8415 	struct drm_device *dev = crtc->base.dev;
8416 	struct drm_i915_private *dev_priv = to_i915(dev);
8417 	int pipe = pipe_config->cpu_transcoder;
8418 	struct dpll clock;
8419 	u32 mdiv;
8420 	int refclk = 100000;
8421 
8422 	/* In case of DSI, DPLL will not be used */
8423 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8424 		return;
8425 
8426 	vlv_dpio_get(dev_priv);
8427 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8428 	vlv_dpio_put(dev_priv);
8429 
8430 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8431 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8432 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8433 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8434 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8435 
8436 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8437 }
8438 
8439 static void
8440 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8441 			      struct intel_initial_plane_config *plane_config)
8442 {
8443 	struct drm_device *dev = crtc->base.dev;
8444 	struct drm_i915_private *dev_priv = to_i915(dev);
8445 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8446 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8447 	enum pipe pipe;
8448 	u32 val, base, offset;
8449 	int fourcc, pixel_format;
8450 	unsigned int aligned_height;
8451 	struct drm_framebuffer *fb;
8452 	struct intel_framebuffer *intel_fb;
8453 
8454 	if (!plane->get_hw_state(plane, &pipe))
8455 		return;
8456 
8457 	WARN_ON(pipe != crtc->pipe);
8458 
8459 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8460 	if (!intel_fb) {
8461 		DRM_DEBUG_KMS("failed to alloc fb\n");
8462 		return;
8463 	}
8464 
8465 	fb = &intel_fb->base;
8466 
8467 	fb->dev = dev;
8468 
8469 	val = I915_READ(DSPCNTR(i9xx_plane));
8470 
8471 	if (INTEL_GEN(dev_priv) >= 4) {
8472 		if (val & DISPPLANE_TILED) {
8473 			plane_config->tiling = I915_TILING_X;
8474 			fb->modifier = I915_FORMAT_MOD_X_TILED;
8475 		}
8476 
8477 		if (val & DISPPLANE_ROTATE_180)
8478 			plane_config->rotation = DRM_MODE_ROTATE_180;
8479 	}
8480 
8481 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8482 	    val & DISPPLANE_MIRROR)
8483 		plane_config->rotation |= DRM_MODE_REFLECT_X;
8484 
8485 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8486 	fourcc = i9xx_format_to_fourcc(pixel_format);
8487 	fb->format = drm_format_info(fourcc);
8488 
8489 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8490 		offset = I915_READ(DSPOFFSET(i9xx_plane));
8491 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8492 	} else if (INTEL_GEN(dev_priv) >= 4) {
8493 		if (plane_config->tiling)
8494 			offset = I915_READ(DSPTILEOFF(i9xx_plane));
8495 		else
8496 			offset = I915_READ(DSPLINOFF(i9xx_plane));
8497 		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8498 	} else {
8499 		base = I915_READ(DSPADDR(i9xx_plane));
8500 	}
8501 	plane_config->base = base;
8502 
8503 	val = I915_READ(PIPESRC(pipe));
8504 	fb->width = ((val >> 16) & 0xfff) + 1;
8505 	fb->height = ((val >> 0) & 0xfff) + 1;
8506 
8507 	val = I915_READ(DSPSTRIDE(i9xx_plane));
8508 	fb->pitches[0] = val & 0xffffffc0;
8509 
8510 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
8511 
8512 	plane_config->size = fb->pitches[0] * aligned_height;
8513 
8514 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8515 		      crtc->base.name, plane->base.name, fb->width, fb->height,
8516 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
8517 		      plane_config->size);
8518 
8519 	plane_config->fb = intel_fb;
8520 }
8521 
8522 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8523 			       struct intel_crtc_state *pipe_config)
8524 {
8525 	struct drm_device *dev = crtc->base.dev;
8526 	struct drm_i915_private *dev_priv = to_i915(dev);
8527 	int pipe = pipe_config->cpu_transcoder;
8528 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8529 	struct dpll clock;
8530 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8531 	int refclk = 100000;
8532 
8533 	/* In case of DSI, DPLL will not be used */
8534 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8535 		return;
8536 
8537 	vlv_dpio_get(dev_priv);
8538 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8539 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8540 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8541 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8542 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8543 	vlv_dpio_put(dev_priv);
8544 
8545 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8546 	clock.m2 = (pll_dw0 & 0xff) << 22;
8547 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8548 		clock.m2 |= pll_dw2 & 0x3fffff;
8549 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8550 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8551 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8552 
8553 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8554 }
8555 
8556 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8557 					struct intel_crtc_state *pipe_config)
8558 {
8559 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8560 	enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8561 
8562 	pipe_config->lspcon_downsampling = false;
8563 
8564 	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8565 		u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8566 
8567 		if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8568 			bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8569 			bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8570 
8571 			if (ycbcr420_enabled) {
8572 				/* We support 4:2:0 in full blend mode only */
8573 				if (!blend)
8574 					output = INTEL_OUTPUT_FORMAT_INVALID;
8575 				else if (!(IS_GEMINILAKE(dev_priv) ||
8576 					   INTEL_GEN(dev_priv) >= 10))
8577 					output = INTEL_OUTPUT_FORMAT_INVALID;
8578 				else
8579 					output = INTEL_OUTPUT_FORMAT_YCBCR420;
8580 			} else {
8581 				/*
8582 				 * Currently there is no interface defined to
8583 				 * check user preference between RGB/YCBCR444
8584 				 * or YCBCR420. So the only possible case for
8585 				 * YCBCR444 usage is driving YCBCR420 output
8586 				 * with LSPCON, when pipe is configured for
8587 				 * YCBCR444 output and LSPCON takes care of
8588 				 * downsampling it.
8589 				 */
8590 				pipe_config->lspcon_downsampling = true;
8591 				output = INTEL_OUTPUT_FORMAT_YCBCR444;
8592 			}
8593 		}
8594 	}
8595 
8596 	pipe_config->output_format = output;
8597 }
8598 
8599 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8600 {
8601 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8602 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8603 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8604 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8605 	u32 tmp;
8606 
8607 	tmp = I915_READ(DSPCNTR(i9xx_plane));
8608 
8609 	if (tmp & DISPPLANE_GAMMA_ENABLE)
8610 		crtc_state->gamma_enable = true;
8611 
8612 	if (!HAS_GMCH(dev_priv) &&
8613 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
8614 		crtc_state->csc_enable = true;
8615 }
8616 
8617 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8618 				 struct intel_crtc_state *pipe_config)
8619 {
8620 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8621 	enum intel_display_power_domain power_domain;
8622 	intel_wakeref_t wakeref;
8623 	u32 tmp;
8624 	bool ret;
8625 
8626 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8627 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8628 	if (!wakeref)
8629 		return false;
8630 
8631 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8632 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8633 	pipe_config->shared_dpll = NULL;
8634 
8635 	ret = false;
8636 
8637 	tmp = I915_READ(PIPECONF(crtc->pipe));
8638 	if (!(tmp & PIPECONF_ENABLE))
8639 		goto out;
8640 
8641 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8642 	    IS_CHERRYVIEW(dev_priv)) {
8643 		switch (tmp & PIPECONF_BPC_MASK) {
8644 		case PIPECONF_6BPC:
8645 			pipe_config->pipe_bpp = 18;
8646 			break;
8647 		case PIPECONF_8BPC:
8648 			pipe_config->pipe_bpp = 24;
8649 			break;
8650 		case PIPECONF_10BPC:
8651 			pipe_config->pipe_bpp = 30;
8652 			break;
8653 		default:
8654 			break;
8655 		}
8656 	}
8657 
8658 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8659 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8660 		pipe_config->limited_color_range = true;
8661 
8662 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8663 		PIPECONF_GAMMA_MODE_SHIFT;
8664 
8665 	if (IS_CHERRYVIEW(dev_priv))
8666 		pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8667 
8668 	i9xx_get_pipe_color_config(pipe_config);
8669 	intel_color_get_config(pipe_config);
8670 
8671 	if (INTEL_GEN(dev_priv) < 4)
8672 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8673 
8674 	intel_get_pipe_timings(crtc, pipe_config);
8675 	intel_get_pipe_src_size(crtc, pipe_config);
8676 
8677 	i9xx_get_pfit_config(crtc, pipe_config);
8678 
8679 	if (INTEL_GEN(dev_priv) >= 4) {
8680 		/* No way to read it out on pipes B and C */
8681 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8682 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
8683 		else
8684 			tmp = I915_READ(DPLL_MD(crtc->pipe));
8685 		pipe_config->pixel_multiplier =
8686 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8687 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8688 		pipe_config->dpll_hw_state.dpll_md = tmp;
8689 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8690 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8691 		tmp = I915_READ(DPLL(crtc->pipe));
8692 		pipe_config->pixel_multiplier =
8693 			((tmp & SDVO_MULTIPLIER_MASK)
8694 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8695 	} else {
8696 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8697 		 * port and will be fixed up in the encoder->get_config
8698 		 * function. */
8699 		pipe_config->pixel_multiplier = 1;
8700 	}
8701 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8702 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8703 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8704 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8705 	} else {
8706 		/* Mask out read-only status bits. */
8707 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8708 						     DPLL_PORTC_READY_MASK |
8709 						     DPLL_PORTB_READY_MASK);
8710 	}
8711 
8712 	if (IS_CHERRYVIEW(dev_priv))
8713 		chv_crtc_clock_get(crtc, pipe_config);
8714 	else if (IS_VALLEYVIEW(dev_priv))
8715 		vlv_crtc_clock_get(crtc, pipe_config);
8716 	else
8717 		i9xx_crtc_clock_get(crtc, pipe_config);
8718 
8719 	/*
8720 	 * Normally the dotclock is filled in by the encoder .get_config()
8721 	 * but in case the pipe is enabled w/o any ports we need a sane
8722 	 * default.
8723 	 */
8724 	pipe_config->base.adjusted_mode.crtc_clock =
8725 		pipe_config->port_clock / pipe_config->pixel_multiplier;
8726 
8727 	ret = true;
8728 
8729 out:
8730 	intel_display_power_put(dev_priv, power_domain, wakeref);
8731 
8732 	return ret;
8733 }
8734 
8735 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8736 {
8737 	struct intel_encoder *encoder;
8738 	int i;
8739 	u32 val, final;
8740 	bool has_lvds = false;
8741 	bool has_cpu_edp = false;
8742 	bool has_panel = false;
8743 	bool has_ck505 = false;
8744 	bool can_ssc = false;
8745 	bool using_ssc_source = false;
8746 
8747 	/* We need to take the global config into account */
8748 	for_each_intel_encoder(&dev_priv->drm, encoder) {
8749 		switch (encoder->type) {
8750 		case INTEL_OUTPUT_LVDS:
8751 			has_panel = true;
8752 			has_lvds = true;
8753 			break;
8754 		case INTEL_OUTPUT_EDP:
8755 			has_panel = true;
8756 			if (encoder->port == PORT_A)
8757 				has_cpu_edp = true;
8758 			break;
8759 		default:
8760 			break;
8761 		}
8762 	}
8763 
8764 	if (HAS_PCH_IBX(dev_priv)) {
8765 		has_ck505 = dev_priv->vbt.display_clock_mode;
8766 		can_ssc = has_ck505;
8767 	} else {
8768 		has_ck505 = false;
8769 		can_ssc = true;
8770 	}
8771 
8772 	/* Check if any DPLLs are using the SSC source */
8773 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8774 		u32 temp = I915_READ(PCH_DPLL(i));
8775 
8776 		if (!(temp & DPLL_VCO_ENABLE))
8777 			continue;
8778 
8779 		if ((temp & PLL_REF_INPUT_MASK) ==
8780 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8781 			using_ssc_source = true;
8782 			break;
8783 		}
8784 	}
8785 
8786 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8787 		      has_panel, has_lvds, has_ck505, using_ssc_source);
8788 
8789 	/* Ironlake: try to setup display ref clock before DPLL
8790 	 * enabling. This is only under driver's control after
8791 	 * PCH B stepping, previous chipset stepping should be
8792 	 * ignoring this setting.
8793 	 */
8794 	val = I915_READ(PCH_DREF_CONTROL);
8795 
8796 	/* As we must carefully and slowly disable/enable each source in turn,
8797 	 * compute the final state we want first and check if we need to
8798 	 * make any changes at all.
8799 	 */
8800 	final = val;
8801 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8802 	if (has_ck505)
8803 		final |= DREF_NONSPREAD_CK505_ENABLE;
8804 	else
8805 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8806 
8807 	final &= ~DREF_SSC_SOURCE_MASK;
8808 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8809 	final &= ~DREF_SSC1_ENABLE;
8810 
8811 	if (has_panel) {
8812 		final |= DREF_SSC_SOURCE_ENABLE;
8813 
8814 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8815 			final |= DREF_SSC1_ENABLE;
8816 
8817 		if (has_cpu_edp) {
8818 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8819 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8820 			else
8821 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8822 		} else
8823 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8824 	} else if (using_ssc_source) {
8825 		final |= DREF_SSC_SOURCE_ENABLE;
8826 		final |= DREF_SSC1_ENABLE;
8827 	}
8828 
8829 	if (final == val)
8830 		return;
8831 
8832 	/* Always enable nonspread source */
8833 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8834 
8835 	if (has_ck505)
8836 		val |= DREF_NONSPREAD_CK505_ENABLE;
8837 	else
8838 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8839 
8840 	if (has_panel) {
8841 		val &= ~DREF_SSC_SOURCE_MASK;
8842 		val |= DREF_SSC_SOURCE_ENABLE;
8843 
8844 		/* SSC must be turned on before enabling the CPU output  */
8845 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8846 			DRM_DEBUG_KMS("Using SSC on panel\n");
8847 			val |= DREF_SSC1_ENABLE;
8848 		} else
8849 			val &= ~DREF_SSC1_ENABLE;
8850 
8851 		/* Get SSC going before enabling the outputs */
8852 		I915_WRITE(PCH_DREF_CONTROL, val);
8853 		POSTING_READ(PCH_DREF_CONTROL);
8854 		udelay(200);
8855 
8856 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8857 
8858 		/* Enable CPU source on CPU attached eDP */
8859 		if (has_cpu_edp) {
8860 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8861 				DRM_DEBUG_KMS("Using SSC on eDP\n");
8862 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8863 			} else
8864 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8865 		} else
8866 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8867 
8868 		I915_WRITE(PCH_DREF_CONTROL, val);
8869 		POSTING_READ(PCH_DREF_CONTROL);
8870 		udelay(200);
8871 	} else {
8872 		DRM_DEBUG_KMS("Disabling CPU source output\n");
8873 
8874 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8875 
8876 		/* Turn off CPU output */
8877 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8878 
8879 		I915_WRITE(PCH_DREF_CONTROL, val);
8880 		POSTING_READ(PCH_DREF_CONTROL);
8881 		udelay(200);
8882 
8883 		if (!using_ssc_source) {
8884 			DRM_DEBUG_KMS("Disabling SSC source\n");
8885 
8886 			/* Turn off the SSC source */
8887 			val &= ~DREF_SSC_SOURCE_MASK;
8888 			val |= DREF_SSC_SOURCE_DISABLE;
8889 
8890 			/* Turn off SSC1 */
8891 			val &= ~DREF_SSC1_ENABLE;
8892 
8893 			I915_WRITE(PCH_DREF_CONTROL, val);
8894 			POSTING_READ(PCH_DREF_CONTROL);
8895 			udelay(200);
8896 		}
8897 	}
8898 
8899 	BUG_ON(val != final);
8900 }
8901 
8902 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8903 {
8904 	u32 tmp;
8905 
8906 	tmp = I915_READ(SOUTH_CHICKEN2);
8907 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8908 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8909 
8910 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8911 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8912 		DRM_ERROR("FDI mPHY reset assert timeout\n");
8913 
8914 	tmp = I915_READ(SOUTH_CHICKEN2);
8915 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8916 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8917 
8918 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8919 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8920 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8921 }
8922 
8923 /* WaMPhyProgramming:hsw */
8924 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8925 {
8926 	u32 tmp;
8927 
8928 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8929 	tmp &= ~(0xFF << 24);
8930 	tmp |= (0x12 << 24);
8931 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8932 
8933 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8934 	tmp |= (1 << 11);
8935 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8936 
8937 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8938 	tmp |= (1 << 11);
8939 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8940 
8941 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8942 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8943 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8944 
8945 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8946 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8947 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8948 
8949 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8950 	tmp &= ~(7 << 13);
8951 	tmp |= (5 << 13);
8952 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8953 
8954 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8955 	tmp &= ~(7 << 13);
8956 	tmp |= (5 << 13);
8957 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8958 
8959 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8960 	tmp &= ~0xFF;
8961 	tmp |= 0x1C;
8962 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8963 
8964 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8965 	tmp &= ~0xFF;
8966 	tmp |= 0x1C;
8967 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8968 
8969 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8970 	tmp &= ~(0xFF << 16);
8971 	tmp |= (0x1C << 16);
8972 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8973 
8974 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8975 	tmp &= ~(0xFF << 16);
8976 	tmp |= (0x1C << 16);
8977 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8978 
8979 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8980 	tmp |= (1 << 27);
8981 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8982 
8983 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8984 	tmp |= (1 << 27);
8985 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8986 
8987 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8988 	tmp &= ~(0xF << 28);
8989 	tmp |= (4 << 28);
8990 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8991 
8992 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8993 	tmp &= ~(0xF << 28);
8994 	tmp |= (4 << 28);
8995 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8996 }
8997 
8998 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8999  * Programming" based on the parameters passed:
9000  * - Sequence to enable CLKOUT_DP
9001  * - Sequence to enable CLKOUT_DP without spread
9002  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9003  */
9004 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9005 				 bool with_spread, bool with_fdi)
9006 {
9007 	u32 reg, tmp;
9008 
9009 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9010 		with_spread = true;
9011 	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9012 	    with_fdi, "LP PCH doesn't have FDI\n"))
9013 		with_fdi = false;
9014 
9015 	mutex_lock(&dev_priv->sb_lock);
9016 
9017 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9018 	tmp &= ~SBI_SSCCTL_DISABLE;
9019 	tmp |= SBI_SSCCTL_PATHALT;
9020 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9021 
9022 	udelay(24);
9023 
9024 	if (with_spread) {
9025 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9026 		tmp &= ~SBI_SSCCTL_PATHALT;
9027 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9028 
9029 		if (with_fdi) {
9030 			lpt_reset_fdi_mphy(dev_priv);
9031 			lpt_program_fdi_mphy(dev_priv);
9032 		}
9033 	}
9034 
9035 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9036 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9037 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9038 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9039 
9040 	mutex_unlock(&dev_priv->sb_lock);
9041 }
9042 
9043 /* Sequence to disable CLKOUT_DP */
9044 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9045 {
9046 	u32 reg, tmp;
9047 
9048 	mutex_lock(&dev_priv->sb_lock);
9049 
9050 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9051 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9052 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9053 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9054 
9055 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9056 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
9057 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
9058 			tmp |= SBI_SSCCTL_PATHALT;
9059 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9060 			udelay(32);
9061 		}
9062 		tmp |= SBI_SSCCTL_DISABLE;
9063 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9064 	}
9065 
9066 	mutex_unlock(&dev_priv->sb_lock);
9067 }
9068 
9069 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9070 
9071 static const u16 sscdivintphase[] = {
9072 	[BEND_IDX( 50)] = 0x3B23,
9073 	[BEND_IDX( 45)] = 0x3B23,
9074 	[BEND_IDX( 40)] = 0x3C23,
9075 	[BEND_IDX( 35)] = 0x3C23,
9076 	[BEND_IDX( 30)] = 0x3D23,
9077 	[BEND_IDX( 25)] = 0x3D23,
9078 	[BEND_IDX( 20)] = 0x3E23,
9079 	[BEND_IDX( 15)] = 0x3E23,
9080 	[BEND_IDX( 10)] = 0x3F23,
9081 	[BEND_IDX(  5)] = 0x3F23,
9082 	[BEND_IDX(  0)] = 0x0025,
9083 	[BEND_IDX( -5)] = 0x0025,
9084 	[BEND_IDX(-10)] = 0x0125,
9085 	[BEND_IDX(-15)] = 0x0125,
9086 	[BEND_IDX(-20)] = 0x0225,
9087 	[BEND_IDX(-25)] = 0x0225,
9088 	[BEND_IDX(-30)] = 0x0325,
9089 	[BEND_IDX(-35)] = 0x0325,
9090 	[BEND_IDX(-40)] = 0x0425,
9091 	[BEND_IDX(-45)] = 0x0425,
9092 	[BEND_IDX(-50)] = 0x0525,
9093 };
9094 
9095 /*
9096  * Bend CLKOUT_DP
9097  * steps -50 to 50 inclusive, in steps of 5
9098  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9099  * change in clock period = -(steps / 10) * 5.787 ps
9100  */
9101 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9102 {
9103 	u32 tmp;
9104 	int idx = BEND_IDX(steps);
9105 
9106 	if (WARN_ON(steps % 5 != 0))
9107 		return;
9108 
9109 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9110 		return;
9111 
9112 	mutex_lock(&dev_priv->sb_lock);
9113 
9114 	if (steps % 10 != 0)
9115 		tmp = 0xAAAAAAAB;
9116 	else
9117 		tmp = 0x00000000;
9118 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9119 
9120 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9121 	tmp &= 0xffff0000;
9122 	tmp |= sscdivintphase[idx];
9123 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9124 
9125 	mutex_unlock(&dev_priv->sb_lock);
9126 }
9127 
9128 #undef BEND_IDX
9129 
9130 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9131 {
9132 	u32 fuse_strap = I915_READ(FUSE_STRAP);
9133 	u32 ctl = I915_READ(SPLL_CTL);
9134 
9135 	if ((ctl & SPLL_PLL_ENABLE) == 0)
9136 		return false;
9137 
9138 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9139 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9140 		return true;
9141 
9142 	if (IS_BROADWELL(dev_priv) &&
9143 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9144 		return true;
9145 
9146 	return false;
9147 }
9148 
9149 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9150 			       enum intel_dpll_id id)
9151 {
9152 	u32 fuse_strap = I915_READ(FUSE_STRAP);
9153 	u32 ctl = I915_READ(WRPLL_CTL(id));
9154 
9155 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
9156 		return false;
9157 
9158 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9159 		return true;
9160 
9161 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9162 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9163 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9164 		return true;
9165 
9166 	return false;
9167 }
9168 
9169 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9170 {
9171 	struct intel_encoder *encoder;
9172 	bool pch_ssc_in_use = false;
9173 	bool has_fdi = false;
9174 
9175 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9176 		switch (encoder->type) {
9177 		case INTEL_OUTPUT_ANALOG:
9178 			has_fdi = true;
9179 			break;
9180 		default:
9181 			break;
9182 		}
9183 	}
9184 
9185 	/*
9186 	 * The BIOS may have decided to use the PCH SSC
9187 	 * reference so we must not disable it until the
9188 	 * relevant PLLs have stopped relying on it. We'll
9189 	 * just leave the PCH SSC reference enabled in case
9190 	 * any active PLL is using it. It will get disabled
9191 	 * after runtime suspend if we don't have FDI.
9192 	 *
9193 	 * TODO: Move the whole reference clock handling
9194 	 * to the modeset sequence proper so that we can
9195 	 * actually enable/disable/reconfigure these things
9196 	 * safely. To do that we need to introduce a real
9197 	 * clock hierarchy. That would also allow us to do
9198 	 * clock bending finally.
9199 	 */
9200 	if (spll_uses_pch_ssc(dev_priv)) {
9201 		DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9202 		pch_ssc_in_use = true;
9203 	}
9204 
9205 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9206 		DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9207 		pch_ssc_in_use = true;
9208 	}
9209 
9210 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9211 		DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9212 		pch_ssc_in_use = true;
9213 	}
9214 
9215 	if (pch_ssc_in_use)
9216 		return;
9217 
9218 	if (has_fdi) {
9219 		lpt_bend_clkout_dp(dev_priv, 0);
9220 		lpt_enable_clkout_dp(dev_priv, true, true);
9221 	} else {
9222 		lpt_disable_clkout_dp(dev_priv);
9223 	}
9224 }
9225 
9226 /*
9227  * Initialize reference clocks when the driver loads
9228  */
9229 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9230 {
9231 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9232 		ironlake_init_pch_refclk(dev_priv);
9233 	else if (HAS_PCH_LPT(dev_priv))
9234 		lpt_init_pch_refclk(dev_priv);
9235 }
9236 
9237 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9238 {
9239 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9240 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9241 	enum pipe pipe = crtc->pipe;
9242 	u32 val;
9243 
9244 	val = 0;
9245 
9246 	switch (crtc_state->pipe_bpp) {
9247 	case 18:
9248 		val |= PIPECONF_6BPC;
9249 		break;
9250 	case 24:
9251 		val |= PIPECONF_8BPC;
9252 		break;
9253 	case 30:
9254 		val |= PIPECONF_10BPC;
9255 		break;
9256 	case 36:
9257 		val |= PIPECONF_12BPC;
9258 		break;
9259 	default:
9260 		/* Case prevented by intel_choose_pipe_bpp_dither. */
9261 		BUG();
9262 	}
9263 
9264 	if (crtc_state->dither)
9265 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9266 
9267 	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9268 		val |= PIPECONF_INTERLACED_ILK;
9269 	else
9270 		val |= PIPECONF_PROGRESSIVE;
9271 
9272 	if (crtc_state->limited_color_range)
9273 		val |= PIPECONF_COLOR_RANGE_SELECT;
9274 
9275 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9276 
9277 	I915_WRITE(PIPECONF(pipe), val);
9278 	POSTING_READ(PIPECONF(pipe));
9279 }
9280 
9281 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9282 {
9283 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9284 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9285 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9286 	u32 val = 0;
9287 
9288 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
9289 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9290 
9291 	if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9292 		val |= PIPECONF_INTERLACED_ILK;
9293 	else
9294 		val |= PIPECONF_PROGRESSIVE;
9295 
9296 	I915_WRITE(PIPECONF(cpu_transcoder), val);
9297 	POSTING_READ(PIPECONF(cpu_transcoder));
9298 }
9299 
9300 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9301 {
9302 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9303 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9304 	u32 val = 0;
9305 
9306 	switch (crtc_state->pipe_bpp) {
9307 	case 18:
9308 		val |= PIPEMISC_DITHER_6_BPC;
9309 		break;
9310 	case 24:
9311 		val |= PIPEMISC_DITHER_8_BPC;
9312 		break;
9313 	case 30:
9314 		val |= PIPEMISC_DITHER_10_BPC;
9315 		break;
9316 	case 36:
9317 		val |= PIPEMISC_DITHER_12_BPC;
9318 		break;
9319 	default:
9320 		MISSING_CASE(crtc_state->pipe_bpp);
9321 		break;
9322 	}
9323 
9324 	if (crtc_state->dither)
9325 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9326 
9327 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9328 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9329 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9330 
9331 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9332 		val |= PIPEMISC_YUV420_ENABLE |
9333 			PIPEMISC_YUV420_MODE_FULL_BLEND;
9334 
9335 	if (INTEL_GEN(dev_priv) >= 11 &&
9336 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9337 					   BIT(PLANE_CURSOR))) == 0)
9338 		val |= PIPEMISC_HDR_MODE_PRECISION;
9339 
9340 	I915_WRITE(PIPEMISC(crtc->pipe), val);
9341 }
9342 
9343 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9344 {
9345 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9346 	u32 tmp;
9347 
9348 	tmp = I915_READ(PIPEMISC(crtc->pipe));
9349 
9350 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9351 	case PIPEMISC_DITHER_6_BPC:
9352 		return 18;
9353 	case PIPEMISC_DITHER_8_BPC:
9354 		return 24;
9355 	case PIPEMISC_DITHER_10_BPC:
9356 		return 30;
9357 	case PIPEMISC_DITHER_12_BPC:
9358 		return 36;
9359 	default:
9360 		MISSING_CASE(tmp);
9361 		return 0;
9362 	}
9363 }
9364 
9365 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9366 {
9367 	/*
9368 	 * Account for spread spectrum to avoid
9369 	 * oversubscribing the link. Max center spread
9370 	 * is 2.5%; use 5% for safety's sake.
9371 	 */
9372 	u32 bps = target_clock * bpp * 21 / 20;
9373 	return DIV_ROUND_UP(bps, link_bw * 8);
9374 }
9375 
9376 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9377 {
9378 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9379 }
9380 
9381 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9382 				  struct intel_crtc_state *crtc_state,
9383 				  struct dpll *reduced_clock)
9384 {
9385 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9386 	u32 dpll, fp, fp2;
9387 	int factor;
9388 
9389 	/* Enable autotuning of the PLL clock (if permissible) */
9390 	factor = 21;
9391 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9392 		if ((intel_panel_use_ssc(dev_priv) &&
9393 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
9394 		    (HAS_PCH_IBX(dev_priv) &&
9395 		     intel_is_dual_link_lvds(dev_priv)))
9396 			factor = 25;
9397 	} else if (crtc_state->sdvo_tv_clock) {
9398 		factor = 20;
9399 	}
9400 
9401 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9402 
9403 	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9404 		fp |= FP_CB_TUNE;
9405 
9406 	if (reduced_clock) {
9407 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
9408 
9409 		if (reduced_clock->m < factor * reduced_clock->n)
9410 			fp2 |= FP_CB_TUNE;
9411 	} else {
9412 		fp2 = fp;
9413 	}
9414 
9415 	dpll = 0;
9416 
9417 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9418 		dpll |= DPLLB_MODE_LVDS;
9419 	else
9420 		dpll |= DPLLB_MODE_DAC_SERIAL;
9421 
9422 	dpll |= (crtc_state->pixel_multiplier - 1)
9423 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9424 
9425 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9426 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9427 		dpll |= DPLL_SDVO_HIGH_SPEED;
9428 
9429 	if (intel_crtc_has_dp_encoder(crtc_state))
9430 		dpll |= DPLL_SDVO_HIGH_SPEED;
9431 
9432 	/*
9433 	 * The high speed IO clock is only really required for
9434 	 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9435 	 * possible to share the DPLL between CRT and HDMI. Enabling
9436 	 * the clock needlessly does no real harm, except use up a
9437 	 * bit of power potentially.
9438 	 *
9439 	 * We'll limit this to IVB with 3 pipes, since it has only two
9440 	 * DPLLs and so DPLL sharing is the only way to get three pipes
9441 	 * driving PCH ports at the same time. On SNB we could do this,
9442 	 * and potentially avoid enabling the second DPLL, but it's not
9443 	 * clear if it''s a win or loss power wise. No point in doing
9444 	 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9445 	 */
9446 	if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9447 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9448 		dpll |= DPLL_SDVO_HIGH_SPEED;
9449 
9450 	/* compute bitmask from p1 value */
9451 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9452 	/* also FPA1 */
9453 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9454 
9455 	switch (crtc_state->dpll.p2) {
9456 	case 5:
9457 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9458 		break;
9459 	case 7:
9460 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9461 		break;
9462 	case 10:
9463 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9464 		break;
9465 	case 14:
9466 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9467 		break;
9468 	}
9469 
9470 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9471 	    intel_panel_use_ssc(dev_priv))
9472 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9473 	else
9474 		dpll |= PLL_REF_INPUT_DREFCLK;
9475 
9476 	dpll |= DPLL_VCO_ENABLE;
9477 
9478 	crtc_state->dpll_hw_state.dpll = dpll;
9479 	crtc_state->dpll_hw_state.fp0 = fp;
9480 	crtc_state->dpll_hw_state.fp1 = fp2;
9481 }
9482 
9483 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9484 				       struct intel_crtc_state *crtc_state)
9485 {
9486 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9487 	const struct intel_limit *limit;
9488 	int refclk = 120000;
9489 
9490 	memset(&crtc_state->dpll_hw_state, 0,
9491 	       sizeof(crtc_state->dpll_hw_state));
9492 
9493 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9494 	if (!crtc_state->has_pch_encoder)
9495 		return 0;
9496 
9497 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9498 		if (intel_panel_use_ssc(dev_priv)) {
9499 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9500 				      dev_priv->vbt.lvds_ssc_freq);
9501 			refclk = dev_priv->vbt.lvds_ssc_freq;
9502 		}
9503 
9504 		if (intel_is_dual_link_lvds(dev_priv)) {
9505 			if (refclk == 100000)
9506 				limit = &intel_limits_ironlake_dual_lvds_100m;
9507 			else
9508 				limit = &intel_limits_ironlake_dual_lvds;
9509 		} else {
9510 			if (refclk == 100000)
9511 				limit = &intel_limits_ironlake_single_lvds_100m;
9512 			else
9513 				limit = &intel_limits_ironlake_single_lvds;
9514 		}
9515 	} else {
9516 		limit = &intel_limits_ironlake_dac;
9517 	}
9518 
9519 	if (!crtc_state->clock_set &&
9520 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9521 				refclk, NULL, &crtc_state->dpll)) {
9522 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
9523 		return -EINVAL;
9524 	}
9525 
9526 	ironlake_compute_dpll(crtc, crtc_state, NULL);
9527 
9528 	if (!intel_get_shared_dpll(crtc_state, NULL)) {
9529 		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9530 			      pipe_name(crtc->pipe));
9531 		return -EINVAL;
9532 	}
9533 
9534 	return 0;
9535 }
9536 
9537 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9538 					 struct intel_link_m_n *m_n)
9539 {
9540 	struct drm_device *dev = crtc->base.dev;
9541 	struct drm_i915_private *dev_priv = to_i915(dev);
9542 	enum pipe pipe = crtc->pipe;
9543 
9544 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9545 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9546 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9547 		& ~TU_SIZE_MASK;
9548 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9549 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9550 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9551 }
9552 
9553 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9554 					 enum transcoder transcoder,
9555 					 struct intel_link_m_n *m_n,
9556 					 struct intel_link_m_n *m2_n2)
9557 {
9558 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9559 	enum pipe pipe = crtc->pipe;
9560 
9561 	if (INTEL_GEN(dev_priv) >= 5) {
9562 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9563 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9564 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9565 			& ~TU_SIZE_MASK;
9566 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9567 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9568 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9569 
9570 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9571 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9572 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9573 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9574 					& ~TU_SIZE_MASK;
9575 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9576 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9577 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9578 		}
9579 	} else {
9580 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9581 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9582 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9583 			& ~TU_SIZE_MASK;
9584 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9585 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9586 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9587 	}
9588 }
9589 
9590 void intel_dp_get_m_n(struct intel_crtc *crtc,
9591 		      struct intel_crtc_state *pipe_config)
9592 {
9593 	if (pipe_config->has_pch_encoder)
9594 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9595 	else
9596 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9597 					     &pipe_config->dp_m_n,
9598 					     &pipe_config->dp_m2_n2);
9599 }
9600 
9601 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9602 					struct intel_crtc_state *pipe_config)
9603 {
9604 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9605 				     &pipe_config->fdi_m_n, NULL);
9606 }
9607 
9608 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9609 				    struct intel_crtc_state *pipe_config)
9610 {
9611 	struct drm_device *dev = crtc->base.dev;
9612 	struct drm_i915_private *dev_priv = to_i915(dev);
9613 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9614 	u32 ps_ctrl = 0;
9615 	int id = -1;
9616 	int i;
9617 
9618 	/* find scaler attached to this pipe */
9619 	for (i = 0; i < crtc->num_scalers; i++) {
9620 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9621 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9622 			id = i;
9623 			pipe_config->pch_pfit.enabled = true;
9624 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9625 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9626 			scaler_state->scalers[i].in_use = true;
9627 			break;
9628 		}
9629 	}
9630 
9631 	scaler_state->scaler_id = id;
9632 	if (id >= 0) {
9633 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9634 	} else {
9635 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9636 	}
9637 }
9638 
9639 static void
9640 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9641 				 struct intel_initial_plane_config *plane_config)
9642 {
9643 	struct drm_device *dev = crtc->base.dev;
9644 	struct drm_i915_private *dev_priv = to_i915(dev);
9645 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9646 	enum plane_id plane_id = plane->id;
9647 	enum pipe pipe;
9648 	u32 val, base, offset, stride_mult, tiling, alpha;
9649 	int fourcc, pixel_format;
9650 	unsigned int aligned_height;
9651 	struct drm_framebuffer *fb;
9652 	struct intel_framebuffer *intel_fb;
9653 
9654 	if (!plane->get_hw_state(plane, &pipe))
9655 		return;
9656 
9657 	WARN_ON(pipe != crtc->pipe);
9658 
9659 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9660 	if (!intel_fb) {
9661 		DRM_DEBUG_KMS("failed to alloc fb\n");
9662 		return;
9663 	}
9664 
9665 	fb = &intel_fb->base;
9666 
9667 	fb->dev = dev;
9668 
9669 	val = I915_READ(PLANE_CTL(pipe, plane_id));
9670 
9671 	if (INTEL_GEN(dev_priv) >= 11)
9672 		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9673 	else
9674 		pixel_format = val & PLANE_CTL_FORMAT_MASK;
9675 
9676 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9677 		alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9678 		alpha &= PLANE_COLOR_ALPHA_MASK;
9679 	} else {
9680 		alpha = val & PLANE_CTL_ALPHA_MASK;
9681 	}
9682 
9683 	fourcc = skl_format_to_fourcc(pixel_format,
9684 				      val & PLANE_CTL_ORDER_RGBX, alpha);
9685 	fb->format = drm_format_info(fourcc);
9686 
9687 	tiling = val & PLANE_CTL_TILED_MASK;
9688 	switch (tiling) {
9689 	case PLANE_CTL_TILED_LINEAR:
9690 		fb->modifier = DRM_FORMAT_MOD_LINEAR;
9691 		break;
9692 	case PLANE_CTL_TILED_X:
9693 		plane_config->tiling = I915_TILING_X;
9694 		fb->modifier = I915_FORMAT_MOD_X_TILED;
9695 		break;
9696 	case PLANE_CTL_TILED_Y:
9697 		plane_config->tiling = I915_TILING_Y;
9698 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9699 			fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9700 		else
9701 			fb->modifier = I915_FORMAT_MOD_Y_TILED;
9702 		break;
9703 	case PLANE_CTL_TILED_YF:
9704 		if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9705 			fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9706 		else
9707 			fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9708 		break;
9709 	default:
9710 		MISSING_CASE(tiling);
9711 		goto error;
9712 	}
9713 
9714 	/*
9715 	 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9716 	 * while i915 HW rotation is clockwise, thats why this swapping.
9717 	 */
9718 	switch (val & PLANE_CTL_ROTATE_MASK) {
9719 	case PLANE_CTL_ROTATE_0:
9720 		plane_config->rotation = DRM_MODE_ROTATE_0;
9721 		break;
9722 	case PLANE_CTL_ROTATE_90:
9723 		plane_config->rotation = DRM_MODE_ROTATE_270;
9724 		break;
9725 	case PLANE_CTL_ROTATE_180:
9726 		plane_config->rotation = DRM_MODE_ROTATE_180;
9727 		break;
9728 	case PLANE_CTL_ROTATE_270:
9729 		plane_config->rotation = DRM_MODE_ROTATE_90;
9730 		break;
9731 	}
9732 
9733 	if (INTEL_GEN(dev_priv) >= 10 &&
9734 	    val & PLANE_CTL_FLIP_HORIZONTAL)
9735 		plane_config->rotation |= DRM_MODE_REFLECT_X;
9736 
9737 	base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9738 	plane_config->base = base;
9739 
9740 	offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9741 
9742 	val = I915_READ(PLANE_SIZE(pipe, plane_id));
9743 	fb->height = ((val >> 16) & 0xfff) + 1;
9744 	fb->width = ((val >> 0) & 0x1fff) + 1;
9745 
9746 	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9747 	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9748 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9749 
9750 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
9751 
9752 	plane_config->size = fb->pitches[0] * aligned_height;
9753 
9754 	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9755 		      crtc->base.name, plane->base.name, fb->width, fb->height,
9756 		      fb->format->cpp[0] * 8, base, fb->pitches[0],
9757 		      plane_config->size);
9758 
9759 	plane_config->fb = intel_fb;
9760 	return;
9761 
9762 error:
9763 	kfree(intel_fb);
9764 }
9765 
9766 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9767 				     struct intel_crtc_state *pipe_config)
9768 {
9769 	struct drm_device *dev = crtc->base.dev;
9770 	struct drm_i915_private *dev_priv = to_i915(dev);
9771 	u32 tmp;
9772 
9773 	tmp = I915_READ(PF_CTL(crtc->pipe));
9774 
9775 	if (tmp & PF_ENABLE) {
9776 		pipe_config->pch_pfit.enabled = true;
9777 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9778 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9779 
9780 		/* We currently do not free assignements of panel fitters on
9781 		 * ivb/hsw (since we don't use the higher upscaling modes which
9782 		 * differentiates them) so just WARN about this case for now. */
9783 		if (IS_GEN(dev_priv, 7)) {
9784 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9785 				PF_PIPE_SEL_IVB(crtc->pipe));
9786 		}
9787 	}
9788 }
9789 
9790 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9791 				     struct intel_crtc_state *pipe_config)
9792 {
9793 	struct drm_device *dev = crtc->base.dev;
9794 	struct drm_i915_private *dev_priv = to_i915(dev);
9795 	enum intel_display_power_domain power_domain;
9796 	intel_wakeref_t wakeref;
9797 	u32 tmp;
9798 	bool ret;
9799 
9800 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9801 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9802 	if (!wakeref)
9803 		return false;
9804 
9805 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9806 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9807 	pipe_config->shared_dpll = NULL;
9808 
9809 	ret = false;
9810 	tmp = I915_READ(PIPECONF(crtc->pipe));
9811 	if (!(tmp & PIPECONF_ENABLE))
9812 		goto out;
9813 
9814 	switch (tmp & PIPECONF_BPC_MASK) {
9815 	case PIPECONF_6BPC:
9816 		pipe_config->pipe_bpp = 18;
9817 		break;
9818 	case PIPECONF_8BPC:
9819 		pipe_config->pipe_bpp = 24;
9820 		break;
9821 	case PIPECONF_10BPC:
9822 		pipe_config->pipe_bpp = 30;
9823 		break;
9824 	case PIPECONF_12BPC:
9825 		pipe_config->pipe_bpp = 36;
9826 		break;
9827 	default:
9828 		break;
9829 	}
9830 
9831 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9832 		pipe_config->limited_color_range = true;
9833 
9834 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9835 		PIPECONF_GAMMA_MODE_SHIFT;
9836 
9837 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9838 
9839 	i9xx_get_pipe_color_config(pipe_config);
9840 	intel_color_get_config(pipe_config);
9841 
9842 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9843 		struct intel_shared_dpll *pll;
9844 		enum intel_dpll_id pll_id;
9845 
9846 		pipe_config->has_pch_encoder = true;
9847 
9848 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9849 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9850 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9851 
9852 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9853 
9854 		if (HAS_PCH_IBX(dev_priv)) {
9855 			/*
9856 			 * The pipe->pch transcoder and pch transcoder->pll
9857 			 * mapping is fixed.
9858 			 */
9859 			pll_id = (enum intel_dpll_id) crtc->pipe;
9860 		} else {
9861 			tmp = I915_READ(PCH_DPLL_SEL);
9862 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9863 				pll_id = DPLL_ID_PCH_PLL_B;
9864 			else
9865 				pll_id= DPLL_ID_PCH_PLL_A;
9866 		}
9867 
9868 		pipe_config->shared_dpll =
9869 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
9870 		pll = pipe_config->shared_dpll;
9871 
9872 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9873 						&pipe_config->dpll_hw_state));
9874 
9875 		tmp = pipe_config->dpll_hw_state.dpll;
9876 		pipe_config->pixel_multiplier =
9877 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9878 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9879 
9880 		ironlake_pch_clock_get(crtc, pipe_config);
9881 	} else {
9882 		pipe_config->pixel_multiplier = 1;
9883 	}
9884 
9885 	intel_get_pipe_timings(crtc, pipe_config);
9886 	intel_get_pipe_src_size(crtc, pipe_config);
9887 
9888 	ironlake_get_pfit_config(crtc, pipe_config);
9889 
9890 	ret = true;
9891 
9892 out:
9893 	intel_display_power_put(dev_priv, power_domain, wakeref);
9894 
9895 	return ret;
9896 }
9897 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9898 				      struct intel_crtc_state *crtc_state)
9899 {
9900 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9901 	struct intel_atomic_state *state =
9902 		to_intel_atomic_state(crtc_state->base.state);
9903 
9904 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9905 	    INTEL_GEN(dev_priv) >= 11) {
9906 		struct intel_encoder *encoder =
9907 			intel_get_crtc_new_encoder(state, crtc_state);
9908 
9909 		if (!intel_get_shared_dpll(crtc_state, encoder)) {
9910 			DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9911 				      pipe_name(crtc->pipe));
9912 			return -EINVAL;
9913 		}
9914 	}
9915 
9916 	return 0;
9917 }
9918 
9919 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9920 				   enum port port,
9921 				   struct intel_crtc_state *pipe_config)
9922 {
9923 	enum intel_dpll_id id;
9924 	u32 temp;
9925 
9926 	temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9927 	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9928 
9929 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9930 		return;
9931 
9932 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9933 }
9934 
9935 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9936 				enum port port,
9937 				struct intel_crtc_state *pipe_config)
9938 {
9939 	enum intel_dpll_id id;
9940 	u32 temp;
9941 
9942 	/* TODO: TBT pll not implemented. */
9943 	if (intel_port_is_combophy(dev_priv, port)) {
9944 		temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9945 		       DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9946 		id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9947 	} else if (intel_port_is_tc(dev_priv, port)) {
9948 		id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
9949 	} else {
9950 		WARN(1, "Invalid port %x\n", port);
9951 		return;
9952 	}
9953 
9954 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9955 }
9956 
9957 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9958 				enum port port,
9959 				struct intel_crtc_state *pipe_config)
9960 {
9961 	enum intel_dpll_id id;
9962 
9963 	switch (port) {
9964 	case PORT_A:
9965 		id = DPLL_ID_SKL_DPLL0;
9966 		break;
9967 	case PORT_B:
9968 		id = DPLL_ID_SKL_DPLL1;
9969 		break;
9970 	case PORT_C:
9971 		id = DPLL_ID_SKL_DPLL2;
9972 		break;
9973 	default:
9974 		DRM_ERROR("Incorrect port type\n");
9975 		return;
9976 	}
9977 
9978 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9979 }
9980 
9981 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9982 				enum port port,
9983 				struct intel_crtc_state *pipe_config)
9984 {
9985 	enum intel_dpll_id id;
9986 	u32 temp;
9987 
9988 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9989 	id = temp >> (port * 3 + 1);
9990 
9991 	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9992 		return;
9993 
9994 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9995 }
9996 
9997 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9998 				enum port port,
9999 				struct intel_crtc_state *pipe_config)
10000 {
10001 	enum intel_dpll_id id;
10002 	u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10003 
10004 	switch (ddi_pll_sel) {
10005 	case PORT_CLK_SEL_WRPLL1:
10006 		id = DPLL_ID_WRPLL1;
10007 		break;
10008 	case PORT_CLK_SEL_WRPLL2:
10009 		id = DPLL_ID_WRPLL2;
10010 		break;
10011 	case PORT_CLK_SEL_SPLL:
10012 		id = DPLL_ID_SPLL;
10013 		break;
10014 	case PORT_CLK_SEL_LCPLL_810:
10015 		id = DPLL_ID_LCPLL_810;
10016 		break;
10017 	case PORT_CLK_SEL_LCPLL_1350:
10018 		id = DPLL_ID_LCPLL_1350;
10019 		break;
10020 	case PORT_CLK_SEL_LCPLL_2700:
10021 		id = DPLL_ID_LCPLL_2700;
10022 		break;
10023 	default:
10024 		MISSING_CASE(ddi_pll_sel);
10025 		/* fall through */
10026 	case PORT_CLK_SEL_NONE:
10027 		return;
10028 	}
10029 
10030 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10031 }
10032 
10033 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10034 				     struct intel_crtc_state *pipe_config,
10035 				     u64 *power_domain_mask,
10036 				     intel_wakeref_t *wakerefs)
10037 {
10038 	struct drm_device *dev = crtc->base.dev;
10039 	struct drm_i915_private *dev_priv = to_i915(dev);
10040 	enum intel_display_power_domain power_domain;
10041 	unsigned long panel_transcoder_mask = 0;
10042 	unsigned long enabled_panel_transcoders = 0;
10043 	enum transcoder panel_transcoder;
10044 	intel_wakeref_t wf;
10045 	u32 tmp;
10046 
10047 	if (INTEL_GEN(dev_priv) >= 11)
10048 		panel_transcoder_mask |=
10049 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10050 
10051 	if (HAS_TRANSCODER_EDP(dev_priv))
10052 		panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10053 
10054 	/*
10055 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
10056 	 * and DSI transcoders handled below.
10057 	 */
10058 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10059 
10060 	/*
10061 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10062 	 * consistency and less surprising code; it's in always on power).
10063 	 */
10064 	for_each_set_bit(panel_transcoder,
10065 			 &panel_transcoder_mask,
10066 			 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10067 		bool force_thru = false;
10068 		enum pipe trans_pipe;
10069 
10070 		tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10071 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10072 			continue;
10073 
10074 		/*
10075 		 * Log all enabled ones, only use the first one.
10076 		 *
10077 		 * FIXME: This won't work for two separate DSI displays.
10078 		 */
10079 		enabled_panel_transcoders |= BIT(panel_transcoder);
10080 		if (enabled_panel_transcoders != BIT(panel_transcoder))
10081 			continue;
10082 
10083 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10084 		default:
10085 			WARN(1, "unknown pipe linked to transcoder %s\n",
10086 			     transcoder_name(panel_transcoder));
10087 			/* fall through */
10088 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
10089 			force_thru = true;
10090 			/* fall through */
10091 		case TRANS_DDI_EDP_INPUT_A_ON:
10092 			trans_pipe = PIPE_A;
10093 			break;
10094 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
10095 			trans_pipe = PIPE_B;
10096 			break;
10097 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
10098 			trans_pipe = PIPE_C;
10099 			break;
10100 		}
10101 
10102 		if (trans_pipe == crtc->pipe) {
10103 			pipe_config->cpu_transcoder = panel_transcoder;
10104 			pipe_config->pch_pfit.force_thru = force_thru;
10105 		}
10106 	}
10107 
10108 	/*
10109 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10110 	 */
10111 	WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10112 		enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10113 
10114 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10115 	WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10116 
10117 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10118 	if (!wf)
10119 		return false;
10120 
10121 	wakerefs[power_domain] = wf;
10122 	*power_domain_mask |= BIT_ULL(power_domain);
10123 
10124 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10125 
10126 	return tmp & PIPECONF_ENABLE;
10127 }
10128 
10129 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10130 					 struct intel_crtc_state *pipe_config,
10131 					 u64 *power_domain_mask,
10132 					 intel_wakeref_t *wakerefs)
10133 {
10134 	struct drm_device *dev = crtc->base.dev;
10135 	struct drm_i915_private *dev_priv = to_i915(dev);
10136 	enum intel_display_power_domain power_domain;
10137 	enum transcoder cpu_transcoder;
10138 	intel_wakeref_t wf;
10139 	enum port port;
10140 	u32 tmp;
10141 
10142 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10143 		if (port == PORT_A)
10144 			cpu_transcoder = TRANSCODER_DSI_A;
10145 		else
10146 			cpu_transcoder = TRANSCODER_DSI_C;
10147 
10148 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10149 		WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10150 
10151 		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10152 		if (!wf)
10153 			continue;
10154 
10155 		wakerefs[power_domain] = wf;
10156 		*power_domain_mask |= BIT_ULL(power_domain);
10157 
10158 		/*
10159 		 * The PLL needs to be enabled with a valid divider
10160 		 * configuration, otherwise accessing DSI registers will hang
10161 		 * the machine. See BSpec North Display Engine
10162 		 * registers/MIPI[BXT]. We can break out here early, since we
10163 		 * need the same DSI PLL to be enabled for both DSI ports.
10164 		 */
10165 		if (!bxt_dsi_pll_is_enabled(dev_priv))
10166 			break;
10167 
10168 		/* XXX: this works for video mode only */
10169 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10170 		if (!(tmp & DPI_ENABLE))
10171 			continue;
10172 
10173 		tmp = I915_READ(MIPI_CTRL(port));
10174 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10175 			continue;
10176 
10177 		pipe_config->cpu_transcoder = cpu_transcoder;
10178 		break;
10179 	}
10180 
10181 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
10182 }
10183 
10184 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10185 				       struct intel_crtc_state *pipe_config)
10186 {
10187 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10188 	struct intel_shared_dpll *pll;
10189 	enum port port;
10190 	u32 tmp;
10191 
10192 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10193 
10194 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10195 
10196 	if (INTEL_GEN(dev_priv) >= 11)
10197 		icelake_get_ddi_pll(dev_priv, port, pipe_config);
10198 	else if (IS_CANNONLAKE(dev_priv))
10199 		cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10200 	else if (IS_GEN9_BC(dev_priv))
10201 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
10202 	else if (IS_GEN9_LP(dev_priv))
10203 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
10204 	else
10205 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
10206 
10207 	pll = pipe_config->shared_dpll;
10208 	if (pll) {
10209 		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10210 						&pipe_config->dpll_hw_state));
10211 	}
10212 
10213 	/*
10214 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10215 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
10216 	 * the PCH transcoder is on.
10217 	 */
10218 	if (INTEL_GEN(dev_priv) < 9 &&
10219 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10220 		pipe_config->has_pch_encoder = true;
10221 
10222 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10223 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10224 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10225 
10226 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
10227 	}
10228 }
10229 
10230 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10231 				    struct intel_crtc_state *pipe_config)
10232 {
10233 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10234 	intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10235 	enum intel_display_power_domain power_domain;
10236 	u64 power_domain_mask;
10237 	bool active;
10238 
10239 	intel_crtc_init_scalers(crtc, pipe_config);
10240 
10241 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10242 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10243 	if (!wf)
10244 		return false;
10245 
10246 	wakerefs[power_domain] = wf;
10247 	power_domain_mask = BIT_ULL(power_domain);
10248 
10249 	pipe_config->shared_dpll = NULL;
10250 
10251 	active = hsw_get_transcoder_state(crtc, pipe_config,
10252 					  &power_domain_mask, wakerefs);
10253 
10254 	if (IS_GEN9_LP(dev_priv) &&
10255 	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
10256 					 &power_domain_mask, wakerefs)) {
10257 		WARN_ON(active);
10258 		active = true;
10259 	}
10260 
10261 	if (!active)
10262 		goto out;
10263 
10264 	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10265 	    INTEL_GEN(dev_priv) >= 11) {
10266 		haswell_get_ddi_port_state(crtc, pipe_config);
10267 		intel_get_pipe_timings(crtc, pipe_config);
10268 	}
10269 
10270 	intel_get_pipe_src_size(crtc, pipe_config);
10271 	intel_get_crtc_ycbcr_config(crtc, pipe_config);
10272 
10273 	pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10274 
10275 	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10276 
10277 	if (INTEL_GEN(dev_priv) >= 9) {
10278 		u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10279 
10280 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10281 			pipe_config->gamma_enable = true;
10282 
10283 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10284 			pipe_config->csc_enable = true;
10285 	} else {
10286 		i9xx_get_pipe_color_config(pipe_config);
10287 	}
10288 
10289 	intel_color_get_config(pipe_config);
10290 
10291 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10292 	WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10293 
10294 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10295 	if (wf) {
10296 		wakerefs[power_domain] = wf;
10297 		power_domain_mask |= BIT_ULL(power_domain);
10298 
10299 		if (INTEL_GEN(dev_priv) >= 9)
10300 			skylake_get_pfit_config(crtc, pipe_config);
10301 		else
10302 			ironlake_get_pfit_config(crtc, pipe_config);
10303 	}
10304 
10305 	if (hsw_crtc_supports_ips(crtc)) {
10306 		if (IS_HASWELL(dev_priv))
10307 			pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10308 		else {
10309 			/*
10310 			 * We cannot readout IPS state on broadwell, set to
10311 			 * true so we can set it to a defined state on first
10312 			 * commit.
10313 			 */
10314 			pipe_config->ips_enabled = true;
10315 		}
10316 	}
10317 
10318 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10319 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10320 		pipe_config->pixel_multiplier =
10321 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10322 	} else {
10323 		pipe_config->pixel_multiplier = 1;
10324 	}
10325 
10326 out:
10327 	for_each_power_domain(power_domain, power_domain_mask)
10328 		intel_display_power_put(dev_priv,
10329 					power_domain, wakerefs[power_domain]);
10330 
10331 	return active;
10332 }
10333 
10334 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10335 {
10336 	struct drm_i915_private *dev_priv =
10337 		to_i915(plane_state->base.plane->dev);
10338 	const struct drm_framebuffer *fb = plane_state->base.fb;
10339 	const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10340 	u32 base;
10341 
10342 	if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10343 		base = obj->phys_handle->busaddr;
10344 	else
10345 		base = intel_plane_ggtt_offset(plane_state);
10346 
10347 	base += plane_state->color_plane[0].offset;
10348 
10349 	/* ILK+ do this automagically */
10350 	if (HAS_GMCH(dev_priv) &&
10351 	    plane_state->base.rotation & DRM_MODE_ROTATE_180)
10352 		base += (plane_state->base.crtc_h *
10353 			 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10354 
10355 	return base;
10356 }
10357 
10358 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10359 {
10360 	int x = plane_state->base.crtc_x;
10361 	int y = plane_state->base.crtc_y;
10362 	u32 pos = 0;
10363 
10364 	if (x < 0) {
10365 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10366 		x = -x;
10367 	}
10368 	pos |= x << CURSOR_X_SHIFT;
10369 
10370 	if (y < 0) {
10371 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10372 		y = -y;
10373 	}
10374 	pos |= y << CURSOR_Y_SHIFT;
10375 
10376 	return pos;
10377 }
10378 
10379 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10380 {
10381 	const struct drm_mode_config *config =
10382 		&plane_state->base.plane->dev->mode_config;
10383 	int width = plane_state->base.crtc_w;
10384 	int height = plane_state->base.crtc_h;
10385 
10386 	return width > 0 && width <= config->cursor_width &&
10387 		height > 0 && height <= config->cursor_height;
10388 }
10389 
10390 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10391 {
10392 	int src_x, src_y;
10393 	u32 offset;
10394 	int ret;
10395 
10396 	ret = intel_plane_compute_gtt(plane_state);
10397 	if (ret)
10398 		return ret;
10399 
10400 	if (!plane_state->base.visible)
10401 		return 0;
10402 
10403 	src_x = plane_state->base.src_x >> 16;
10404 	src_y = plane_state->base.src_y >> 16;
10405 
10406 	intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10407 	offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10408 						    plane_state, 0);
10409 
10410 	if (src_x != 0 || src_y != 0) {
10411 		DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10412 		return -EINVAL;
10413 	}
10414 
10415 	plane_state->color_plane[0].offset = offset;
10416 
10417 	return 0;
10418 }
10419 
10420 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10421 			      struct intel_plane_state *plane_state)
10422 {
10423 	const struct drm_framebuffer *fb = plane_state->base.fb;
10424 	int ret;
10425 
10426 	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10427 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
10428 		return -EINVAL;
10429 	}
10430 
10431 	ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10432 						  &crtc_state->base,
10433 						  DRM_PLANE_HELPER_NO_SCALING,
10434 						  DRM_PLANE_HELPER_NO_SCALING,
10435 						  true, true);
10436 	if (ret)
10437 		return ret;
10438 
10439 	ret = intel_cursor_check_surface(plane_state);
10440 	if (ret)
10441 		return ret;
10442 
10443 	if (!plane_state->base.visible)
10444 		return 0;
10445 
10446 	ret = intel_plane_check_src_coordinates(plane_state);
10447 	if (ret)
10448 		return ret;
10449 
10450 	return 0;
10451 }
10452 
10453 static unsigned int
10454 i845_cursor_max_stride(struct intel_plane *plane,
10455 		       u32 pixel_format, u64 modifier,
10456 		       unsigned int rotation)
10457 {
10458 	return 2048;
10459 }
10460 
10461 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10462 {
10463 	u32 cntl = 0;
10464 
10465 	if (crtc_state->gamma_enable)
10466 		cntl |= CURSOR_GAMMA_ENABLE;
10467 
10468 	return cntl;
10469 }
10470 
10471 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10472 			   const struct intel_plane_state *plane_state)
10473 {
10474 	return CURSOR_ENABLE |
10475 		CURSOR_FORMAT_ARGB |
10476 		CURSOR_STRIDE(plane_state->color_plane[0].stride);
10477 }
10478 
10479 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10480 {
10481 	int width = plane_state->base.crtc_w;
10482 
10483 	/*
10484 	 * 845g/865g are only limited by the width of their cursors,
10485 	 * the height is arbitrary up to the precision of the register.
10486 	 */
10487 	return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10488 }
10489 
10490 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10491 			     struct intel_plane_state *plane_state)
10492 {
10493 	const struct drm_framebuffer *fb = plane_state->base.fb;
10494 	int ret;
10495 
10496 	ret = intel_check_cursor(crtc_state, plane_state);
10497 	if (ret)
10498 		return ret;
10499 
10500 	/* if we want to turn off the cursor ignore width and height */
10501 	if (!fb)
10502 		return 0;
10503 
10504 	/* Check for which cursor types we support */
10505 	if (!i845_cursor_size_ok(plane_state)) {
10506 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10507 			  plane_state->base.crtc_w,
10508 			  plane_state->base.crtc_h);
10509 		return -EINVAL;
10510 	}
10511 
10512 	WARN_ON(plane_state->base.visible &&
10513 		plane_state->color_plane[0].stride != fb->pitches[0]);
10514 
10515 	switch (fb->pitches[0]) {
10516 	case 256:
10517 	case 512:
10518 	case 1024:
10519 	case 2048:
10520 		break;
10521 	default:
10522 		DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10523 			      fb->pitches[0]);
10524 		return -EINVAL;
10525 	}
10526 
10527 	plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10528 
10529 	return 0;
10530 }
10531 
10532 static void i845_update_cursor(struct intel_plane *plane,
10533 			       const struct intel_crtc_state *crtc_state,
10534 			       const struct intel_plane_state *plane_state)
10535 {
10536 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10537 	u32 cntl = 0, base = 0, pos = 0, size = 0;
10538 	unsigned long irqflags;
10539 
10540 	if (plane_state && plane_state->base.visible) {
10541 		unsigned int width = plane_state->base.crtc_w;
10542 		unsigned int height = plane_state->base.crtc_h;
10543 
10544 		cntl = plane_state->ctl |
10545 			i845_cursor_ctl_crtc(crtc_state);
10546 
10547 		size = (height << 12) | width;
10548 
10549 		base = intel_cursor_base(plane_state);
10550 		pos = intel_cursor_position(plane_state);
10551 	}
10552 
10553 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10554 
10555 	/* On these chipsets we can only modify the base/size/stride
10556 	 * whilst the cursor is disabled.
10557 	 */
10558 	if (plane->cursor.base != base ||
10559 	    plane->cursor.size != size ||
10560 	    plane->cursor.cntl != cntl) {
10561 		I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10562 		I915_WRITE_FW(CURBASE(PIPE_A), base);
10563 		I915_WRITE_FW(CURSIZE, size);
10564 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
10565 		I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10566 
10567 		plane->cursor.base = base;
10568 		plane->cursor.size = size;
10569 		plane->cursor.cntl = cntl;
10570 	} else {
10571 		I915_WRITE_FW(CURPOS(PIPE_A), pos);
10572 	}
10573 
10574 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10575 }
10576 
10577 static void i845_disable_cursor(struct intel_plane *plane,
10578 				const struct intel_crtc_state *crtc_state)
10579 {
10580 	i845_update_cursor(plane, crtc_state, NULL);
10581 }
10582 
10583 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10584 				     enum pipe *pipe)
10585 {
10586 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10587 	enum intel_display_power_domain power_domain;
10588 	intel_wakeref_t wakeref;
10589 	bool ret;
10590 
10591 	power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10592 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10593 	if (!wakeref)
10594 		return false;
10595 
10596 	ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10597 
10598 	*pipe = PIPE_A;
10599 
10600 	intel_display_power_put(dev_priv, power_domain, wakeref);
10601 
10602 	return ret;
10603 }
10604 
10605 static unsigned int
10606 i9xx_cursor_max_stride(struct intel_plane *plane,
10607 		       u32 pixel_format, u64 modifier,
10608 		       unsigned int rotation)
10609 {
10610 	return plane->base.dev->mode_config.cursor_width * 4;
10611 }
10612 
10613 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10614 {
10615 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10616 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10617 	u32 cntl = 0;
10618 
10619 	if (INTEL_GEN(dev_priv) >= 11)
10620 		return cntl;
10621 
10622 	if (crtc_state->gamma_enable)
10623 		cntl = MCURSOR_GAMMA_ENABLE;
10624 
10625 	if (crtc_state->csc_enable)
10626 		cntl |= MCURSOR_PIPE_CSC_ENABLE;
10627 
10628 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10629 		cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10630 
10631 	return cntl;
10632 }
10633 
10634 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10635 			   const struct intel_plane_state *plane_state)
10636 {
10637 	struct drm_i915_private *dev_priv =
10638 		to_i915(plane_state->base.plane->dev);
10639 	u32 cntl = 0;
10640 
10641 	if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10642 		cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10643 
10644 	switch (plane_state->base.crtc_w) {
10645 	case 64:
10646 		cntl |= MCURSOR_MODE_64_ARGB_AX;
10647 		break;
10648 	case 128:
10649 		cntl |= MCURSOR_MODE_128_ARGB_AX;
10650 		break;
10651 	case 256:
10652 		cntl |= MCURSOR_MODE_256_ARGB_AX;
10653 		break;
10654 	default:
10655 		MISSING_CASE(plane_state->base.crtc_w);
10656 		return 0;
10657 	}
10658 
10659 	if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10660 		cntl |= MCURSOR_ROTATE_180;
10661 
10662 	return cntl;
10663 }
10664 
10665 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10666 {
10667 	struct drm_i915_private *dev_priv =
10668 		to_i915(plane_state->base.plane->dev);
10669 	int width = plane_state->base.crtc_w;
10670 	int height = plane_state->base.crtc_h;
10671 
10672 	if (!intel_cursor_size_ok(plane_state))
10673 		return false;
10674 
10675 	/* Cursor width is limited to a few power-of-two sizes */
10676 	switch (width) {
10677 	case 256:
10678 	case 128:
10679 	case 64:
10680 		break;
10681 	default:
10682 		return false;
10683 	}
10684 
10685 	/*
10686 	 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10687 	 * height from 8 lines up to the cursor width, when the
10688 	 * cursor is not rotated. Everything else requires square
10689 	 * cursors.
10690 	 */
10691 	if (HAS_CUR_FBC(dev_priv) &&
10692 	    plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10693 		if (height < 8 || height > width)
10694 			return false;
10695 	} else {
10696 		if (height != width)
10697 			return false;
10698 	}
10699 
10700 	return true;
10701 }
10702 
10703 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10704 			     struct intel_plane_state *plane_state)
10705 {
10706 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10707 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10708 	const struct drm_framebuffer *fb = plane_state->base.fb;
10709 	enum pipe pipe = plane->pipe;
10710 	int ret;
10711 
10712 	ret = intel_check_cursor(crtc_state, plane_state);
10713 	if (ret)
10714 		return ret;
10715 
10716 	/* if we want to turn off the cursor ignore width and height */
10717 	if (!fb)
10718 		return 0;
10719 
10720 	/* Check for which cursor types we support */
10721 	if (!i9xx_cursor_size_ok(plane_state)) {
10722 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10723 			  plane_state->base.crtc_w,
10724 			  plane_state->base.crtc_h);
10725 		return -EINVAL;
10726 	}
10727 
10728 	WARN_ON(plane_state->base.visible &&
10729 		plane_state->color_plane[0].stride != fb->pitches[0]);
10730 
10731 	if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10732 		DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10733 			      fb->pitches[0], plane_state->base.crtc_w);
10734 		return -EINVAL;
10735 	}
10736 
10737 	/*
10738 	 * There's something wrong with the cursor on CHV pipe C.
10739 	 * If it straddles the left edge of the screen then
10740 	 * moving it away from the edge or disabling it often
10741 	 * results in a pipe underrun, and often that can lead to
10742 	 * dead pipe (constant underrun reported, and it scans
10743 	 * out just a solid color). To recover from that, the
10744 	 * display power well must be turned off and on again.
10745 	 * Refuse the put the cursor into that compromised position.
10746 	 */
10747 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10748 	    plane_state->base.visible && plane_state->base.crtc_x < 0) {
10749 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10750 		return -EINVAL;
10751 	}
10752 
10753 	plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10754 
10755 	return 0;
10756 }
10757 
10758 static void i9xx_update_cursor(struct intel_plane *plane,
10759 			       const struct intel_crtc_state *crtc_state,
10760 			       const struct intel_plane_state *plane_state)
10761 {
10762 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10763 	enum pipe pipe = plane->pipe;
10764 	u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10765 	unsigned long irqflags;
10766 
10767 	if (plane_state && plane_state->base.visible) {
10768 		cntl = plane_state->ctl |
10769 			i9xx_cursor_ctl_crtc(crtc_state);
10770 
10771 		if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10772 			fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10773 
10774 		base = intel_cursor_base(plane_state);
10775 		pos = intel_cursor_position(plane_state);
10776 	}
10777 
10778 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10779 
10780 	/*
10781 	 * On some platforms writing CURCNTR first will also
10782 	 * cause CURPOS to be armed by the CURBASE write.
10783 	 * Without the CURCNTR write the CURPOS write would
10784 	 * arm itself. Thus we always update CURCNTR before
10785 	 * CURPOS.
10786 	 *
10787 	 * On other platforms CURPOS always requires the
10788 	 * CURBASE write to arm the update. Additonally
10789 	 * a write to any of the cursor register will cancel
10790 	 * an already armed cursor update. Thus leaving out
10791 	 * the CURBASE write after CURPOS could lead to a
10792 	 * cursor that doesn't appear to move, or even change
10793 	 * shape. Thus we always write CURBASE.
10794 	 *
10795 	 * The other registers are armed by by the CURBASE write
10796 	 * except when the plane is getting enabled at which time
10797 	 * the CURCNTR write arms the update.
10798 	 */
10799 
10800 	if (INTEL_GEN(dev_priv) >= 9)
10801 		skl_write_cursor_wm(plane, crtc_state);
10802 
10803 	if (plane->cursor.base != base ||
10804 	    plane->cursor.size != fbc_ctl ||
10805 	    plane->cursor.cntl != cntl) {
10806 		if (HAS_CUR_FBC(dev_priv))
10807 			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10808 		I915_WRITE_FW(CURCNTR(pipe), cntl);
10809 		I915_WRITE_FW(CURPOS(pipe), pos);
10810 		I915_WRITE_FW(CURBASE(pipe), base);
10811 
10812 		plane->cursor.base = base;
10813 		plane->cursor.size = fbc_ctl;
10814 		plane->cursor.cntl = cntl;
10815 	} else {
10816 		I915_WRITE_FW(CURPOS(pipe), pos);
10817 		I915_WRITE_FW(CURBASE(pipe), base);
10818 	}
10819 
10820 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10821 }
10822 
10823 static void i9xx_disable_cursor(struct intel_plane *plane,
10824 				const struct intel_crtc_state *crtc_state)
10825 {
10826 	i9xx_update_cursor(plane, crtc_state, NULL);
10827 }
10828 
10829 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10830 				     enum pipe *pipe)
10831 {
10832 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10833 	enum intel_display_power_domain power_domain;
10834 	intel_wakeref_t wakeref;
10835 	bool ret;
10836 	u32 val;
10837 
10838 	/*
10839 	 * Not 100% correct for planes that can move between pipes,
10840 	 * but that's only the case for gen2-3 which don't have any
10841 	 * display power wells.
10842 	 */
10843 	power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10844 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10845 	if (!wakeref)
10846 		return false;
10847 
10848 	val = I915_READ(CURCNTR(plane->pipe));
10849 
10850 	ret = val & MCURSOR_MODE;
10851 
10852 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10853 		*pipe = plane->pipe;
10854 	else
10855 		*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10856 			MCURSOR_PIPE_SELECT_SHIFT;
10857 
10858 	intel_display_power_put(dev_priv, power_domain, wakeref);
10859 
10860 	return ret;
10861 }
10862 
10863 /* VESA 640x480x72Hz mode to set on the pipe */
10864 static const struct drm_display_mode load_detect_mode = {
10865 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10866 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10867 };
10868 
10869 struct drm_framebuffer *
10870 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10871 			 struct drm_mode_fb_cmd2 *mode_cmd)
10872 {
10873 	struct intel_framebuffer *intel_fb;
10874 	int ret;
10875 
10876 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10877 	if (!intel_fb)
10878 		return ERR_PTR(-ENOMEM);
10879 
10880 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10881 	if (ret)
10882 		goto err;
10883 
10884 	return &intel_fb->base;
10885 
10886 err:
10887 	kfree(intel_fb);
10888 	return ERR_PTR(ret);
10889 }
10890 
10891 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10892 					struct drm_crtc *crtc)
10893 {
10894 	struct drm_plane *plane;
10895 	struct drm_plane_state *plane_state;
10896 	int ret, i;
10897 
10898 	ret = drm_atomic_add_affected_planes(state, crtc);
10899 	if (ret)
10900 		return ret;
10901 
10902 	for_each_new_plane_in_state(state, plane, plane_state, i) {
10903 		if (plane_state->crtc != crtc)
10904 			continue;
10905 
10906 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10907 		if (ret)
10908 			return ret;
10909 
10910 		drm_atomic_set_fb_for_plane(plane_state, NULL);
10911 	}
10912 
10913 	return 0;
10914 }
10915 
10916 int intel_get_load_detect_pipe(struct drm_connector *connector,
10917 			       const struct drm_display_mode *mode,
10918 			       struct intel_load_detect_pipe *old,
10919 			       struct drm_modeset_acquire_ctx *ctx)
10920 {
10921 	struct intel_crtc *intel_crtc;
10922 	struct intel_encoder *intel_encoder =
10923 		intel_attached_encoder(connector);
10924 	struct drm_crtc *possible_crtc;
10925 	struct drm_encoder *encoder = &intel_encoder->base;
10926 	struct drm_crtc *crtc = NULL;
10927 	struct drm_device *dev = encoder->dev;
10928 	struct drm_i915_private *dev_priv = to_i915(dev);
10929 	struct drm_mode_config *config = &dev->mode_config;
10930 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
10931 	struct drm_connector_state *connector_state;
10932 	struct intel_crtc_state *crtc_state;
10933 	int ret, i = -1;
10934 
10935 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10936 		      connector->base.id, connector->name,
10937 		      encoder->base.id, encoder->name);
10938 
10939 	old->restore_state = NULL;
10940 
10941 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10942 
10943 	/*
10944 	 * Algorithm gets a little messy:
10945 	 *
10946 	 *   - if the connector already has an assigned crtc, use it (but make
10947 	 *     sure it's on first)
10948 	 *
10949 	 *   - try to find the first unused crtc that can drive this connector,
10950 	 *     and use that if we find one
10951 	 */
10952 
10953 	/* See if we already have a CRTC for this connector */
10954 	if (connector->state->crtc) {
10955 		crtc = connector->state->crtc;
10956 
10957 		ret = drm_modeset_lock(&crtc->mutex, ctx);
10958 		if (ret)
10959 			goto fail;
10960 
10961 		/* Make sure the crtc and connector are running */
10962 		goto found;
10963 	}
10964 
10965 	/* Find an unused one (if possible) */
10966 	for_each_crtc(dev, possible_crtc) {
10967 		i++;
10968 		if (!(encoder->possible_crtcs & (1 << i)))
10969 			continue;
10970 
10971 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10972 		if (ret)
10973 			goto fail;
10974 
10975 		if (possible_crtc->state->enable) {
10976 			drm_modeset_unlock(&possible_crtc->mutex);
10977 			continue;
10978 		}
10979 
10980 		crtc = possible_crtc;
10981 		break;
10982 	}
10983 
10984 	/*
10985 	 * If we didn't find an unused CRTC, don't use any.
10986 	 */
10987 	if (!crtc) {
10988 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10989 		ret = -ENODEV;
10990 		goto fail;
10991 	}
10992 
10993 found:
10994 	intel_crtc = to_intel_crtc(crtc);
10995 
10996 	state = drm_atomic_state_alloc(dev);
10997 	restore_state = drm_atomic_state_alloc(dev);
10998 	if (!state || !restore_state) {
10999 		ret = -ENOMEM;
11000 		goto fail;
11001 	}
11002 
11003 	state->acquire_ctx = ctx;
11004 	restore_state->acquire_ctx = ctx;
11005 
11006 	connector_state = drm_atomic_get_connector_state(state, connector);
11007 	if (IS_ERR(connector_state)) {
11008 		ret = PTR_ERR(connector_state);
11009 		goto fail;
11010 	}
11011 
11012 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11013 	if (ret)
11014 		goto fail;
11015 
11016 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11017 	if (IS_ERR(crtc_state)) {
11018 		ret = PTR_ERR(crtc_state);
11019 		goto fail;
11020 	}
11021 
11022 	crtc_state->base.active = crtc_state->base.enable = true;
11023 
11024 	if (!mode)
11025 		mode = &load_detect_mode;
11026 
11027 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
11028 	if (ret)
11029 		goto fail;
11030 
11031 	ret = intel_modeset_disable_planes(state, crtc);
11032 	if (ret)
11033 		goto fail;
11034 
11035 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11036 	if (!ret)
11037 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11038 	if (!ret)
11039 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
11040 	if (ret) {
11041 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11042 		goto fail;
11043 	}
11044 
11045 	ret = drm_atomic_commit(state);
11046 	if (ret) {
11047 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11048 		goto fail;
11049 	}
11050 
11051 	old->restore_state = restore_state;
11052 	drm_atomic_state_put(state);
11053 
11054 	/* let the connector get through one full cycle before testing */
11055 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11056 	return true;
11057 
11058 fail:
11059 	if (state) {
11060 		drm_atomic_state_put(state);
11061 		state = NULL;
11062 	}
11063 	if (restore_state) {
11064 		drm_atomic_state_put(restore_state);
11065 		restore_state = NULL;
11066 	}
11067 
11068 	if (ret == -EDEADLK)
11069 		return ret;
11070 
11071 	return false;
11072 }
11073 
11074 void intel_release_load_detect_pipe(struct drm_connector *connector,
11075 				    struct intel_load_detect_pipe *old,
11076 				    struct drm_modeset_acquire_ctx *ctx)
11077 {
11078 	struct intel_encoder *intel_encoder =
11079 		intel_attached_encoder(connector);
11080 	struct drm_encoder *encoder = &intel_encoder->base;
11081 	struct drm_atomic_state *state = old->restore_state;
11082 	int ret;
11083 
11084 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11085 		      connector->base.id, connector->name,
11086 		      encoder->base.id, encoder->name);
11087 
11088 	if (!state)
11089 		return;
11090 
11091 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11092 	if (ret)
11093 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11094 	drm_atomic_state_put(state);
11095 }
11096 
11097 static int i9xx_pll_refclk(struct drm_device *dev,
11098 			   const struct intel_crtc_state *pipe_config)
11099 {
11100 	struct drm_i915_private *dev_priv = to_i915(dev);
11101 	u32 dpll = pipe_config->dpll_hw_state.dpll;
11102 
11103 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11104 		return dev_priv->vbt.lvds_ssc_freq;
11105 	else if (HAS_PCH_SPLIT(dev_priv))
11106 		return 120000;
11107 	else if (!IS_GEN(dev_priv, 2))
11108 		return 96000;
11109 	else
11110 		return 48000;
11111 }
11112 
11113 /* Returns the clock of the currently programmed mode of the given pipe. */
11114 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11115 				struct intel_crtc_state *pipe_config)
11116 {
11117 	struct drm_device *dev = crtc->base.dev;
11118 	struct drm_i915_private *dev_priv = to_i915(dev);
11119 	int pipe = pipe_config->cpu_transcoder;
11120 	u32 dpll = pipe_config->dpll_hw_state.dpll;
11121 	u32 fp;
11122 	struct dpll clock;
11123 	int port_clock;
11124 	int refclk = i9xx_pll_refclk(dev, pipe_config);
11125 
11126 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11127 		fp = pipe_config->dpll_hw_state.fp0;
11128 	else
11129 		fp = pipe_config->dpll_hw_state.fp1;
11130 
11131 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11132 	if (IS_PINEVIEW(dev_priv)) {
11133 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11134 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11135 	} else {
11136 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11137 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11138 	}
11139 
11140 	if (!IS_GEN(dev_priv, 2)) {
11141 		if (IS_PINEVIEW(dev_priv))
11142 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11143 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11144 		else
11145 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11146 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
11147 
11148 		switch (dpll & DPLL_MODE_MASK) {
11149 		case DPLLB_MODE_DAC_SERIAL:
11150 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11151 				5 : 10;
11152 			break;
11153 		case DPLLB_MODE_LVDS:
11154 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11155 				7 : 14;
11156 			break;
11157 		default:
11158 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11159 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
11160 			return;
11161 		}
11162 
11163 		if (IS_PINEVIEW(dev_priv))
11164 			port_clock = pnv_calc_dpll_params(refclk, &clock);
11165 		else
11166 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
11167 	} else {
11168 		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11169 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11170 
11171 		if (is_lvds) {
11172 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11173 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
11174 
11175 			if (lvds & LVDS_CLKB_POWER_UP)
11176 				clock.p2 = 7;
11177 			else
11178 				clock.p2 = 14;
11179 		} else {
11180 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
11181 				clock.p1 = 2;
11182 			else {
11183 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11184 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11185 			}
11186 			if (dpll & PLL_P2_DIVIDE_BY_4)
11187 				clock.p2 = 4;
11188 			else
11189 				clock.p2 = 2;
11190 		}
11191 
11192 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
11193 	}
11194 
11195 	/*
11196 	 * This value includes pixel_multiplier. We will use
11197 	 * port_clock to compute adjusted_mode.crtc_clock in the
11198 	 * encoder's get_config() function.
11199 	 */
11200 	pipe_config->port_clock = port_clock;
11201 }
11202 
11203 int intel_dotclock_calculate(int link_freq,
11204 			     const struct intel_link_m_n *m_n)
11205 {
11206 	/*
11207 	 * The calculation for the data clock is:
11208 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11209 	 * But we want to avoid losing precison if possible, so:
11210 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11211 	 *
11212 	 * and the link clock is simpler:
11213 	 * link_clock = (m * link_clock) / n
11214 	 */
11215 
11216 	if (!m_n->link_n)
11217 		return 0;
11218 
11219 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11220 }
11221 
11222 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11223 				   struct intel_crtc_state *pipe_config)
11224 {
11225 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11226 
11227 	/* read out port_clock from the DPLL */
11228 	i9xx_crtc_clock_get(crtc, pipe_config);
11229 
11230 	/*
11231 	 * In case there is an active pipe without active ports,
11232 	 * we may need some idea for the dotclock anyway.
11233 	 * Calculate one based on the FDI configuration.
11234 	 */
11235 	pipe_config->base.adjusted_mode.crtc_clock =
11236 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11237 					 &pipe_config->fdi_m_n);
11238 }
11239 
11240 /* Returns the currently programmed mode of the given encoder. */
11241 struct drm_display_mode *
11242 intel_encoder_current_mode(struct intel_encoder *encoder)
11243 {
11244 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11245 	struct intel_crtc_state *crtc_state;
11246 	struct drm_display_mode *mode;
11247 	struct intel_crtc *crtc;
11248 	enum pipe pipe;
11249 
11250 	if (!encoder->get_hw_state(encoder, &pipe))
11251 		return NULL;
11252 
11253 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11254 
11255 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11256 	if (!mode)
11257 		return NULL;
11258 
11259 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11260 	if (!crtc_state) {
11261 		kfree(mode);
11262 		return NULL;
11263 	}
11264 
11265 	crtc_state->base.crtc = &crtc->base;
11266 
11267 	if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11268 		kfree(crtc_state);
11269 		kfree(mode);
11270 		return NULL;
11271 	}
11272 
11273 	encoder->get_config(encoder, crtc_state);
11274 
11275 	intel_mode_from_pipe_config(mode, crtc_state);
11276 
11277 	kfree(crtc_state);
11278 
11279 	return mode;
11280 }
11281 
11282 static void intel_crtc_destroy(struct drm_crtc *crtc)
11283 {
11284 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11285 
11286 	drm_crtc_cleanup(crtc);
11287 	kfree(intel_crtc);
11288 }
11289 
11290 /**
11291  * intel_wm_need_update - Check whether watermarks need updating
11292  * @cur: current plane state
11293  * @new: new plane state
11294  *
11295  * Check current plane state versus the new one to determine whether
11296  * watermarks need to be recalculated.
11297  *
11298  * Returns true or false.
11299  */
11300 static bool intel_wm_need_update(struct intel_plane_state *cur,
11301 				 struct intel_plane_state *new)
11302 {
11303 	/* Update watermarks on tiling or size changes. */
11304 	if (new->base.visible != cur->base.visible)
11305 		return true;
11306 
11307 	if (!cur->base.fb || !new->base.fb)
11308 		return false;
11309 
11310 	if (cur->base.fb->modifier != new->base.fb->modifier ||
11311 	    cur->base.rotation != new->base.rotation ||
11312 	    drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11313 	    drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11314 	    drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11315 	    drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11316 		return true;
11317 
11318 	return false;
11319 }
11320 
11321 static bool needs_scaling(const struct intel_plane_state *state)
11322 {
11323 	int src_w = drm_rect_width(&state->base.src) >> 16;
11324 	int src_h = drm_rect_height(&state->base.src) >> 16;
11325 	int dst_w = drm_rect_width(&state->base.dst);
11326 	int dst_h = drm_rect_height(&state->base.dst);
11327 
11328 	return (src_w != dst_w || src_h != dst_h);
11329 }
11330 
11331 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11332 				    struct drm_crtc_state *crtc_state,
11333 				    const struct intel_plane_state *old_plane_state,
11334 				    struct drm_plane_state *plane_state)
11335 {
11336 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11337 	struct drm_crtc *crtc = crtc_state->crtc;
11338 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11339 	struct intel_plane *plane = to_intel_plane(plane_state->plane);
11340 	struct drm_device *dev = crtc->dev;
11341 	struct drm_i915_private *dev_priv = to_i915(dev);
11342 	bool mode_changed = needs_modeset(crtc_state);
11343 	bool was_crtc_enabled = old_crtc_state->base.active;
11344 	bool is_crtc_enabled = crtc_state->active;
11345 	bool turn_off, turn_on, visible, was_visible;
11346 	struct drm_framebuffer *fb = plane_state->fb;
11347 	int ret;
11348 
11349 	if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11350 		ret = skl_update_scaler_plane(
11351 			to_intel_crtc_state(crtc_state),
11352 			to_intel_plane_state(plane_state));
11353 		if (ret)
11354 			return ret;
11355 	}
11356 
11357 	was_visible = old_plane_state->base.visible;
11358 	visible = plane_state->visible;
11359 
11360 	if (!was_crtc_enabled && WARN_ON(was_visible))
11361 		was_visible = false;
11362 
11363 	/*
11364 	 * Visibility is calculated as if the crtc was on, but
11365 	 * after scaler setup everything depends on it being off
11366 	 * when the crtc isn't active.
11367 	 *
11368 	 * FIXME this is wrong for watermarks. Watermarks should also
11369 	 * be computed as if the pipe would be active. Perhaps move
11370 	 * per-plane wm computation to the .check_plane() hook, and
11371 	 * only combine the results from all planes in the current place?
11372 	 */
11373 	if (!is_crtc_enabled) {
11374 		plane_state->visible = visible = false;
11375 		to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
11376 		to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
11377 	}
11378 
11379 	if (!was_visible && !visible)
11380 		return 0;
11381 
11382 	if (fb != old_plane_state->base.fb)
11383 		pipe_config->fb_changed = true;
11384 
11385 	turn_off = was_visible && (!visible || mode_changed);
11386 	turn_on = visible && (!was_visible || mode_changed);
11387 
11388 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11389 			 intel_crtc->base.base.id, intel_crtc->base.name,
11390 			 plane->base.base.id, plane->base.name,
11391 			 fb ? fb->base.id : -1);
11392 
11393 	DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11394 			 plane->base.base.id, plane->base.name,
11395 			 was_visible, visible,
11396 			 turn_off, turn_on, mode_changed);
11397 
11398 	if (turn_on) {
11399 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11400 			pipe_config->update_wm_pre = true;
11401 
11402 		/* must disable cxsr around plane enable/disable */
11403 		if (plane->id != PLANE_CURSOR)
11404 			pipe_config->disable_cxsr = true;
11405 	} else if (turn_off) {
11406 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11407 			pipe_config->update_wm_post = true;
11408 
11409 		/* must disable cxsr around plane enable/disable */
11410 		if (plane->id != PLANE_CURSOR)
11411 			pipe_config->disable_cxsr = true;
11412 	} else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11413 					to_intel_plane_state(plane_state))) {
11414 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11415 			/* FIXME bollocks */
11416 			pipe_config->update_wm_pre = true;
11417 			pipe_config->update_wm_post = true;
11418 		}
11419 	}
11420 
11421 	if (visible || was_visible)
11422 		pipe_config->fb_bits |= plane->frontbuffer_bit;
11423 
11424 	/*
11425 	 * ILK/SNB DVSACNTR/Sprite Enable
11426 	 * IVB SPR_CTL/Sprite Enable
11427 	 * "When in Self Refresh Big FIFO mode, a write to enable the
11428 	 *  plane will be internally buffered and delayed while Big FIFO
11429 	 *  mode is exiting."
11430 	 *
11431 	 * Which means that enabling the sprite can take an extra frame
11432 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
11433 	 * down to LP0 and wait for vblank in order to make sure the
11434 	 * sprite gets enabled on the next vblank after the register write.
11435 	 * Doing otherwise would risk enabling the sprite one frame after
11436 	 * we've already signalled flip completion. We can resume LP1+
11437 	 * once the sprite has been enabled.
11438 	 *
11439 	 *
11440 	 * WaCxSRDisabledForSpriteScaling:ivb
11441 	 * IVB SPR_SCALE/Scaling Enable
11442 	 * "Low Power watermarks must be disabled for at least one
11443 	 *  frame before enabling sprite scaling, and kept disabled
11444 	 *  until sprite scaling is disabled."
11445 	 *
11446 	 * ILK/SNB DVSASCALE/Scaling Enable
11447 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
11448 	 *  masked off while Big FIFO mode is exiting."
11449 	 *
11450 	 * Despite the w/a only being listed for IVB we assume that
11451 	 * the ILK/SNB note has similar ramifications, hence we apply
11452 	 * the w/a on all three platforms.
11453 	 *
11454 	 * With experimental results seems this is needed also for primary
11455 	 * plane, not only sprite plane.
11456 	 */
11457 	if (plane->id != PLANE_CURSOR &&
11458 	    (IS_GEN_RANGE(dev_priv, 5, 6) ||
11459 	     IS_IVYBRIDGE(dev_priv)) &&
11460 	    (turn_on || (!needs_scaling(old_plane_state) &&
11461 			 needs_scaling(to_intel_plane_state(plane_state)))))
11462 		pipe_config->disable_lp_wm = true;
11463 
11464 	return 0;
11465 }
11466 
11467 static bool encoders_cloneable(const struct intel_encoder *a,
11468 			       const struct intel_encoder *b)
11469 {
11470 	/* masks could be asymmetric, so check both ways */
11471 	return a == b || (a->cloneable & (1 << b->type) &&
11472 			  b->cloneable & (1 << a->type));
11473 }
11474 
11475 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11476 					 struct intel_crtc *crtc,
11477 					 struct intel_encoder *encoder)
11478 {
11479 	struct intel_encoder *source_encoder;
11480 	struct drm_connector *connector;
11481 	struct drm_connector_state *connector_state;
11482 	int i;
11483 
11484 	for_each_new_connector_in_state(state, connector, connector_state, i) {
11485 		if (connector_state->crtc != &crtc->base)
11486 			continue;
11487 
11488 		source_encoder =
11489 			to_intel_encoder(connector_state->best_encoder);
11490 		if (!encoders_cloneable(encoder, source_encoder))
11491 			return false;
11492 	}
11493 
11494 	return true;
11495 }
11496 
11497 static int icl_add_linked_planes(struct intel_atomic_state *state)
11498 {
11499 	struct intel_plane *plane, *linked;
11500 	struct intel_plane_state *plane_state, *linked_plane_state;
11501 	int i;
11502 
11503 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11504 		linked = plane_state->linked_plane;
11505 
11506 		if (!linked)
11507 			continue;
11508 
11509 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
11510 		if (IS_ERR(linked_plane_state))
11511 			return PTR_ERR(linked_plane_state);
11512 
11513 		WARN_ON(linked_plane_state->linked_plane != plane);
11514 		WARN_ON(linked_plane_state->slave == plane_state->slave);
11515 	}
11516 
11517 	return 0;
11518 }
11519 
11520 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11521 {
11522 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11523 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11524 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11525 	struct intel_plane *plane, *linked;
11526 	struct intel_plane_state *plane_state;
11527 	int i;
11528 
11529 	if (INTEL_GEN(dev_priv) < 11)
11530 		return 0;
11531 
11532 	/*
11533 	 * Destroy all old plane links and make the slave plane invisible
11534 	 * in the crtc_state->active_planes mask.
11535 	 */
11536 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11537 		if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11538 			continue;
11539 
11540 		plane_state->linked_plane = NULL;
11541 		if (plane_state->slave && !plane_state->base.visible) {
11542 			crtc_state->active_planes &= ~BIT(plane->id);
11543 			crtc_state->update_planes |= BIT(plane->id);
11544 		}
11545 
11546 		plane_state->slave = false;
11547 	}
11548 
11549 	if (!crtc_state->nv12_planes)
11550 		return 0;
11551 
11552 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11553 		struct intel_plane_state *linked_state = NULL;
11554 
11555 		if (plane->pipe != crtc->pipe ||
11556 		    !(crtc_state->nv12_planes & BIT(plane->id)))
11557 			continue;
11558 
11559 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11560 			if (!icl_is_nv12_y_plane(linked->id))
11561 				continue;
11562 
11563 			if (crtc_state->active_planes & BIT(linked->id))
11564 				continue;
11565 
11566 			linked_state = intel_atomic_get_plane_state(state, linked);
11567 			if (IS_ERR(linked_state))
11568 				return PTR_ERR(linked_state);
11569 
11570 			break;
11571 		}
11572 
11573 		if (!linked_state) {
11574 			DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11575 				      hweight8(crtc_state->nv12_planes));
11576 
11577 			return -EINVAL;
11578 		}
11579 
11580 		plane_state->linked_plane = linked;
11581 
11582 		linked_state->slave = true;
11583 		linked_state->linked_plane = plane;
11584 		crtc_state->active_planes |= BIT(linked->id);
11585 		crtc_state->update_planes |= BIT(linked->id);
11586 		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11587 	}
11588 
11589 	return 0;
11590 }
11591 
11592 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11593 {
11594 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11595 	struct intel_atomic_state *state =
11596 		to_intel_atomic_state(new_crtc_state->base.state);
11597 	const struct intel_crtc_state *old_crtc_state =
11598 		intel_atomic_get_old_crtc_state(state, crtc);
11599 
11600 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11601 }
11602 
11603 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11604 				   struct drm_crtc_state *crtc_state)
11605 {
11606 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11607 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11608 	struct intel_crtc_state *pipe_config =
11609 		to_intel_crtc_state(crtc_state);
11610 	int ret;
11611 	bool mode_changed = needs_modeset(crtc_state);
11612 
11613 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11614 	    mode_changed && !crtc_state->active)
11615 		pipe_config->update_wm_post = true;
11616 
11617 	if (mode_changed && crtc_state->enable &&
11618 	    dev_priv->display.crtc_compute_clock &&
11619 	    !WARN_ON(pipe_config->shared_dpll)) {
11620 		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11621 							   pipe_config);
11622 		if (ret)
11623 			return ret;
11624 	}
11625 
11626 	/*
11627 	 * May need to update pipe gamma enable bits
11628 	 * when C8 planes are getting enabled/disabled.
11629 	 */
11630 	if (c8_planes_changed(pipe_config))
11631 		crtc_state->color_mgmt_changed = true;
11632 
11633 	if (mode_changed || pipe_config->update_pipe ||
11634 	    crtc_state->color_mgmt_changed) {
11635 		ret = intel_color_check(pipe_config);
11636 		if (ret)
11637 			return ret;
11638 	}
11639 
11640 	ret = 0;
11641 	if (dev_priv->display.compute_pipe_wm) {
11642 		ret = dev_priv->display.compute_pipe_wm(pipe_config);
11643 		if (ret) {
11644 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11645 			return ret;
11646 		}
11647 	}
11648 
11649 	if (dev_priv->display.compute_intermediate_wm) {
11650 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11651 			return 0;
11652 
11653 		/*
11654 		 * Calculate 'intermediate' watermarks that satisfy both the
11655 		 * old state and the new state.  We can program these
11656 		 * immediately.
11657 		 */
11658 		ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11659 		if (ret) {
11660 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11661 			return ret;
11662 		}
11663 	}
11664 
11665 	if (INTEL_GEN(dev_priv) >= 9) {
11666 		if (mode_changed || pipe_config->update_pipe)
11667 			ret = skl_update_scaler_crtc(pipe_config);
11668 
11669 		if (!ret)
11670 			ret = icl_check_nv12_planes(pipe_config);
11671 		if (!ret)
11672 			ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11673 							    pipe_config);
11674 		if (!ret)
11675 			ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11676 							 pipe_config);
11677 	}
11678 
11679 	if (HAS_IPS(dev_priv))
11680 		pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11681 
11682 	return ret;
11683 }
11684 
11685 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11686 	.atomic_check = intel_crtc_atomic_check,
11687 };
11688 
11689 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11690 {
11691 	struct intel_connector *connector;
11692 	struct drm_connector_list_iter conn_iter;
11693 
11694 	drm_connector_list_iter_begin(dev, &conn_iter);
11695 	for_each_intel_connector_iter(connector, &conn_iter) {
11696 		if (connector->base.state->crtc)
11697 			drm_connector_put(&connector->base);
11698 
11699 		if (connector->base.encoder) {
11700 			connector->base.state->best_encoder =
11701 				connector->base.encoder;
11702 			connector->base.state->crtc =
11703 				connector->base.encoder->crtc;
11704 
11705 			drm_connector_get(&connector->base);
11706 		} else {
11707 			connector->base.state->best_encoder = NULL;
11708 			connector->base.state->crtc = NULL;
11709 		}
11710 	}
11711 	drm_connector_list_iter_end(&conn_iter);
11712 }
11713 
11714 static int
11715 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11716 		      struct intel_crtc_state *pipe_config)
11717 {
11718 	struct drm_connector *connector = conn_state->connector;
11719 	const struct drm_display_info *info = &connector->display_info;
11720 	int bpp;
11721 
11722 	switch (conn_state->max_bpc) {
11723 	case 6 ... 7:
11724 		bpp = 6 * 3;
11725 		break;
11726 	case 8 ... 9:
11727 		bpp = 8 * 3;
11728 		break;
11729 	case 10 ... 11:
11730 		bpp = 10 * 3;
11731 		break;
11732 	case 12:
11733 		bpp = 12 * 3;
11734 		break;
11735 	default:
11736 		return -EINVAL;
11737 	}
11738 
11739 	if (bpp < pipe_config->pipe_bpp) {
11740 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11741 			      "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11742 			      connector->base.id, connector->name,
11743 			      bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11744 			      pipe_config->pipe_bpp);
11745 
11746 		pipe_config->pipe_bpp = bpp;
11747 	}
11748 
11749 	return 0;
11750 }
11751 
11752 static int
11753 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11754 			  struct intel_crtc_state *pipe_config)
11755 {
11756 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11757 	struct drm_atomic_state *state = pipe_config->base.state;
11758 	struct drm_connector *connector;
11759 	struct drm_connector_state *connector_state;
11760 	int bpp, i;
11761 
11762 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11763 	    IS_CHERRYVIEW(dev_priv)))
11764 		bpp = 10*3;
11765 	else if (INTEL_GEN(dev_priv) >= 5)
11766 		bpp = 12*3;
11767 	else
11768 		bpp = 8*3;
11769 
11770 	pipe_config->pipe_bpp = bpp;
11771 
11772 	/* Clamp display bpp to connector max bpp */
11773 	for_each_new_connector_in_state(state, connector, connector_state, i) {
11774 		int ret;
11775 
11776 		if (connector_state->crtc != &crtc->base)
11777 			continue;
11778 
11779 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11780 		if (ret)
11781 			return ret;
11782 	}
11783 
11784 	return 0;
11785 }
11786 
11787 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11788 {
11789 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11790 		      "type: 0x%x flags: 0x%x\n",
11791 		      mode->crtc_clock,
11792 		      mode->crtc_hdisplay, mode->crtc_hsync_start,
11793 		      mode->crtc_hsync_end, mode->crtc_htotal,
11794 		      mode->crtc_vdisplay, mode->crtc_vsync_start,
11795 		      mode->crtc_vsync_end, mode->crtc_vtotal,
11796 		      mode->type, mode->flags);
11797 }
11798 
11799 static inline void
11800 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
11801 		      const char *id, unsigned int lane_count,
11802 		      const struct intel_link_m_n *m_n)
11803 {
11804 	DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11805 		      id, lane_count,
11806 		      m_n->gmch_m, m_n->gmch_n,
11807 		      m_n->link_m, m_n->link_n, m_n->tu);
11808 }
11809 
11810 static void
11811 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11812 		     const union hdmi_infoframe *frame)
11813 {
11814 	if ((drm_debug & DRM_UT_KMS) == 0)
11815 		return;
11816 
11817 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11818 }
11819 
11820 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11821 
11822 static const char * const output_type_str[] = {
11823 	OUTPUT_TYPE(UNUSED),
11824 	OUTPUT_TYPE(ANALOG),
11825 	OUTPUT_TYPE(DVO),
11826 	OUTPUT_TYPE(SDVO),
11827 	OUTPUT_TYPE(LVDS),
11828 	OUTPUT_TYPE(TVOUT),
11829 	OUTPUT_TYPE(HDMI),
11830 	OUTPUT_TYPE(DP),
11831 	OUTPUT_TYPE(EDP),
11832 	OUTPUT_TYPE(DSI),
11833 	OUTPUT_TYPE(DDI),
11834 	OUTPUT_TYPE(DP_MST),
11835 };
11836 
11837 #undef OUTPUT_TYPE
11838 
11839 static void snprintf_output_types(char *buf, size_t len,
11840 				  unsigned int output_types)
11841 {
11842 	char *str = buf;
11843 	int i;
11844 
11845 	str[0] = '\0';
11846 
11847 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11848 		int r;
11849 
11850 		if ((output_types & BIT(i)) == 0)
11851 			continue;
11852 
11853 		r = snprintf(str, len, "%s%s",
11854 			     str != buf ? "," : "", output_type_str[i]);
11855 		if (r >= len)
11856 			break;
11857 		str += r;
11858 		len -= r;
11859 
11860 		output_types &= ~BIT(i);
11861 	}
11862 
11863 	WARN_ON_ONCE(output_types != 0);
11864 }
11865 
11866 static const char * const output_format_str[] = {
11867 	[INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11868 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11869 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11870 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11871 };
11872 
11873 static const char *output_formats(enum intel_output_format format)
11874 {
11875 	if (format >= ARRAY_SIZE(output_format_str))
11876 		format = INTEL_OUTPUT_FORMAT_INVALID;
11877 	return output_format_str[format];
11878 }
11879 
11880 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
11881 {
11882 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11883 	const struct drm_framebuffer *fb = plane_state->base.fb;
11884 	struct drm_format_name_buf format_name;
11885 
11886 	if (!fb) {
11887 		DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
11888 			      plane->base.base.id, plane->base.name,
11889 			      yesno(plane_state->base.visible));
11890 		return;
11891 	}
11892 
11893 	DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
11894 		      plane->base.base.id, plane->base.name,
11895 		      fb->base.id, fb->width, fb->height,
11896 		      drm_get_format_name(fb->format->format, &format_name),
11897 		      yesno(plane_state->base.visible));
11898 	DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
11899 		      plane_state->base.rotation, plane_state->scaler_id);
11900 	if (plane_state->base.visible)
11901 		DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
11902 			      DRM_RECT_FP_ARG(&plane_state->base.src),
11903 			      DRM_RECT_ARG(&plane_state->base.dst));
11904 }
11905 
11906 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
11907 				   struct intel_atomic_state *state,
11908 				   const char *context)
11909 {
11910 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
11911 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11912 	const struct intel_plane_state *plane_state;
11913 	struct intel_plane *plane;
11914 	char buf[64];
11915 	int i;
11916 
11917 	DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
11918 		      crtc->base.base.id, crtc->base.name,
11919 		      yesno(pipe_config->base.enable), context);
11920 
11921 	if (!pipe_config->base.enable)
11922 		goto dump_planes;
11923 
11924 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11925 	DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
11926 		      yesno(pipe_config->base.active),
11927 		      buf, pipe_config->output_types,
11928 		      output_formats(pipe_config->output_format));
11929 
11930 	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11931 		      transcoder_name(pipe_config->cpu_transcoder),
11932 		      pipe_config->pipe_bpp, pipe_config->dither);
11933 
11934 	if (pipe_config->has_pch_encoder)
11935 		intel_dump_m_n_config(pipe_config, "fdi",
11936 				      pipe_config->fdi_lanes,
11937 				      &pipe_config->fdi_m_n);
11938 
11939 	if (intel_crtc_has_dp_encoder(pipe_config)) {
11940 		intel_dump_m_n_config(pipe_config, "dp m_n",
11941 				pipe_config->lane_count, &pipe_config->dp_m_n);
11942 		if (pipe_config->has_drrs)
11943 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
11944 					      pipe_config->lane_count,
11945 					      &pipe_config->dp_m2_n2);
11946 	}
11947 
11948 	DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
11949 		      pipe_config->has_audio, pipe_config->has_infoframe,
11950 		      pipe_config->infoframes.enable);
11951 
11952 	if (pipe_config->infoframes.enable &
11953 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11954 		DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11955 	if (pipe_config->infoframes.enable &
11956 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11957 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11958 	if (pipe_config->infoframes.enable &
11959 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11960 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11961 	if (pipe_config->infoframes.enable &
11962 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11963 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11964 
11965 	DRM_DEBUG_KMS("requested mode:\n");
11966 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
11967 	DRM_DEBUG_KMS("adjusted mode:\n");
11968 	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11969 	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11970 	DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11971 		      pipe_config->port_clock,
11972 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11973 		      pipe_config->pixel_rate);
11974 
11975 	if (INTEL_GEN(dev_priv) >= 9)
11976 		DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11977 			      crtc->num_scalers,
11978 			      pipe_config->scaler_state.scaler_users,
11979 		              pipe_config->scaler_state.scaler_id);
11980 
11981 	if (HAS_GMCH(dev_priv))
11982 		DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11983 			      pipe_config->gmch_pfit.control,
11984 			      pipe_config->gmch_pfit.pgm_ratios,
11985 			      pipe_config->gmch_pfit.lvds_border_bits);
11986 	else
11987 		DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
11988 			      pipe_config->pch_pfit.pos,
11989 			      pipe_config->pch_pfit.size,
11990 			      enableddisabled(pipe_config->pch_pfit.enabled),
11991 			      yesno(pipe_config->pch_pfit.force_thru));
11992 
11993 	DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11994 		      pipe_config->ips_enabled, pipe_config->double_wide);
11995 
11996 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11997 
11998 dump_planes:
11999 	if (!state)
12000 		return;
12001 
12002 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12003 		if (plane->pipe == crtc->pipe)
12004 			intel_dump_plane_state(plane_state);
12005 	}
12006 }
12007 
12008 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
12009 {
12010 	struct drm_device *dev = state->base.dev;
12011 	struct drm_connector *connector;
12012 	struct drm_connector_list_iter conn_iter;
12013 	unsigned int used_ports = 0;
12014 	unsigned int used_mst_ports = 0;
12015 	bool ret = true;
12016 
12017 	/*
12018 	 * Walk the connector list instead of the encoder
12019 	 * list to detect the problem on ddi platforms
12020 	 * where there's just one encoder per digital port.
12021 	 */
12022 	drm_connector_list_iter_begin(dev, &conn_iter);
12023 	drm_for_each_connector_iter(connector, &conn_iter) {
12024 		struct drm_connector_state *connector_state;
12025 		struct intel_encoder *encoder;
12026 
12027 		connector_state =
12028 			drm_atomic_get_new_connector_state(&state->base,
12029 							   connector);
12030 		if (!connector_state)
12031 			connector_state = connector->state;
12032 
12033 		if (!connector_state->best_encoder)
12034 			continue;
12035 
12036 		encoder = to_intel_encoder(connector_state->best_encoder);
12037 
12038 		WARN_ON(!connector_state->crtc);
12039 
12040 		switch (encoder->type) {
12041 			unsigned int port_mask;
12042 		case INTEL_OUTPUT_DDI:
12043 			if (WARN_ON(!HAS_DDI(to_i915(dev))))
12044 				break;
12045 			/* else: fall through */
12046 		case INTEL_OUTPUT_DP:
12047 		case INTEL_OUTPUT_HDMI:
12048 		case INTEL_OUTPUT_EDP:
12049 			port_mask = 1 << encoder->port;
12050 
12051 			/* the same port mustn't appear more than once */
12052 			if (used_ports & port_mask)
12053 				ret = false;
12054 
12055 			used_ports |= port_mask;
12056 			break;
12057 		case INTEL_OUTPUT_DP_MST:
12058 			used_mst_ports |=
12059 				1 << encoder->port;
12060 			break;
12061 		default:
12062 			break;
12063 		}
12064 	}
12065 	drm_connector_list_iter_end(&conn_iter);
12066 
12067 	/* can't mix MST and SST/HDMI on the same port */
12068 	if (used_ports & used_mst_ports)
12069 		return false;
12070 
12071 	return ret;
12072 }
12073 
12074 static int
12075 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12076 {
12077 	struct drm_i915_private *dev_priv =
12078 		to_i915(crtc_state->base.crtc->dev);
12079 	struct intel_crtc_state *saved_state;
12080 
12081 	saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12082 	if (!saved_state)
12083 		return -ENOMEM;
12084 
12085 	/* FIXME: before the switch to atomic started, a new pipe_config was
12086 	 * kzalloc'd. Code that depends on any field being zero should be
12087 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12088 	 * only fields that are know to not cause problems are preserved. */
12089 
12090 	saved_state->scaler_state = crtc_state->scaler_state;
12091 	saved_state->shared_dpll = crtc_state->shared_dpll;
12092 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12093 	saved_state->crc_enabled = crtc_state->crc_enabled;
12094 	if (IS_G4X(dev_priv) ||
12095 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12096 		saved_state->wm = crtc_state->wm;
12097 
12098 	/* Keep base drm_crtc_state intact, only clear our extended struct */
12099 	BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12100 	memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12101 	       sizeof(*crtc_state) - sizeof(crtc_state->base));
12102 
12103 	kfree(saved_state);
12104 	return 0;
12105 }
12106 
12107 static int
12108 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12109 {
12110 	struct drm_crtc *crtc = pipe_config->base.crtc;
12111 	struct drm_atomic_state *state = pipe_config->base.state;
12112 	struct intel_encoder *encoder;
12113 	struct drm_connector *connector;
12114 	struct drm_connector_state *connector_state;
12115 	int base_bpp, ret;
12116 	int i;
12117 	bool retry = true;
12118 
12119 	ret = clear_intel_crtc_state(pipe_config);
12120 	if (ret)
12121 		return ret;
12122 
12123 	pipe_config->cpu_transcoder =
12124 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12125 
12126 	/*
12127 	 * Sanitize sync polarity flags based on requested ones. If neither
12128 	 * positive or negative polarity is requested, treat this as meaning
12129 	 * negative polarity.
12130 	 */
12131 	if (!(pipe_config->base.adjusted_mode.flags &
12132 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12133 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12134 
12135 	if (!(pipe_config->base.adjusted_mode.flags &
12136 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12137 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12138 
12139 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12140 					pipe_config);
12141 	if (ret)
12142 		return ret;
12143 
12144 	base_bpp = pipe_config->pipe_bpp;
12145 
12146 	/*
12147 	 * Determine the real pipe dimensions. Note that stereo modes can
12148 	 * increase the actual pipe size due to the frame doubling and
12149 	 * insertion of additional space for blanks between the frame. This
12150 	 * is stored in the crtc timings. We use the requested mode to do this
12151 	 * computation to clearly distinguish it from the adjusted mode, which
12152 	 * can be changed by the connectors in the below retry loop.
12153 	 */
12154 	drm_mode_get_hv_timing(&pipe_config->base.mode,
12155 			       &pipe_config->pipe_src_w,
12156 			       &pipe_config->pipe_src_h);
12157 
12158 	for_each_new_connector_in_state(state, connector, connector_state, i) {
12159 		if (connector_state->crtc != crtc)
12160 			continue;
12161 
12162 		encoder = to_intel_encoder(connector_state->best_encoder);
12163 
12164 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12165 			DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12166 			return -EINVAL;
12167 		}
12168 
12169 		/*
12170 		 * Determine output_types before calling the .compute_config()
12171 		 * hooks so that the hooks can use this information safely.
12172 		 */
12173 		if (encoder->compute_output_type)
12174 			pipe_config->output_types |=
12175 				BIT(encoder->compute_output_type(encoder, pipe_config,
12176 								 connector_state));
12177 		else
12178 			pipe_config->output_types |= BIT(encoder->type);
12179 	}
12180 
12181 encoder_retry:
12182 	/* Ensure the port clock defaults are reset when retrying. */
12183 	pipe_config->port_clock = 0;
12184 	pipe_config->pixel_multiplier = 1;
12185 
12186 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12187 	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12188 			      CRTC_STEREO_DOUBLE);
12189 
12190 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12191 	 * adjust it according to limitations or connector properties, and also
12192 	 * a chance to reject the mode entirely.
12193 	 */
12194 	for_each_new_connector_in_state(state, connector, connector_state, i) {
12195 		if (connector_state->crtc != crtc)
12196 			continue;
12197 
12198 		encoder = to_intel_encoder(connector_state->best_encoder);
12199 		ret = encoder->compute_config(encoder, pipe_config,
12200 					      connector_state);
12201 		if (ret < 0) {
12202 			if (ret != -EDEADLK)
12203 				DRM_DEBUG_KMS("Encoder config failure: %d\n",
12204 					      ret);
12205 			return ret;
12206 		}
12207 	}
12208 
12209 	/* Set default port clock if not overwritten by the encoder. Needs to be
12210 	 * done afterwards in case the encoder adjusts the mode. */
12211 	if (!pipe_config->port_clock)
12212 		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12213 			* pipe_config->pixel_multiplier;
12214 
12215 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12216 	if (ret == -EDEADLK)
12217 		return ret;
12218 	if (ret < 0) {
12219 		DRM_DEBUG_KMS("CRTC fixup failed\n");
12220 		return ret;
12221 	}
12222 
12223 	if (ret == RETRY) {
12224 		if (WARN(!retry, "loop in pipe configuration computation\n"))
12225 			return -EINVAL;
12226 
12227 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12228 		retry = false;
12229 		goto encoder_retry;
12230 	}
12231 
12232 	/* Dithering seems to not pass-through bits correctly when it should, so
12233 	 * only enable it on 6bpc panels and when its not a compliance
12234 	 * test requesting 6bpc video pattern.
12235 	 */
12236 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12237 		!pipe_config->dither_force_disable;
12238 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12239 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12240 
12241 	return 0;
12242 }
12243 
12244 bool intel_fuzzy_clock_check(int clock1, int clock2)
12245 {
12246 	int diff;
12247 
12248 	if (clock1 == clock2)
12249 		return true;
12250 
12251 	if (!clock1 || !clock2)
12252 		return false;
12253 
12254 	diff = abs(clock1 - clock2);
12255 
12256 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12257 		return true;
12258 
12259 	return false;
12260 }
12261 
12262 static bool
12263 intel_compare_m_n(unsigned int m, unsigned int n,
12264 		  unsigned int m2, unsigned int n2,
12265 		  bool exact)
12266 {
12267 	if (m == m2 && n == n2)
12268 		return true;
12269 
12270 	if (exact || !m || !n || !m2 || !n2)
12271 		return false;
12272 
12273 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12274 
12275 	if (n > n2) {
12276 		while (n > n2) {
12277 			m2 <<= 1;
12278 			n2 <<= 1;
12279 		}
12280 	} else if (n < n2) {
12281 		while (n < n2) {
12282 			m <<= 1;
12283 			n <<= 1;
12284 		}
12285 	}
12286 
12287 	if (n != n2)
12288 		return false;
12289 
12290 	return intel_fuzzy_clock_check(m, m2);
12291 }
12292 
12293 static bool
12294 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12295 		       const struct intel_link_m_n *m2_n2,
12296 		       bool exact)
12297 {
12298 	return m_n->tu == m2_n2->tu &&
12299 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12300 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12301 		intel_compare_m_n(m_n->link_m, m_n->link_n,
12302 				  m2_n2->link_m, m2_n2->link_n, exact);
12303 }
12304 
12305 static bool
12306 intel_compare_infoframe(const union hdmi_infoframe *a,
12307 			const union hdmi_infoframe *b)
12308 {
12309 	return memcmp(a, b, sizeof(*a)) == 0;
12310 }
12311 
12312 static void
12313 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12314 			       bool fastset, const char *name,
12315 			       const union hdmi_infoframe *a,
12316 			       const union hdmi_infoframe *b)
12317 {
12318 	if (fastset) {
12319 		if ((drm_debug & DRM_UT_KMS) == 0)
12320 			return;
12321 
12322 		drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
12323 		drm_dbg(DRM_UT_KMS, "expected:");
12324 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12325 		drm_dbg(DRM_UT_KMS, "found");
12326 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12327 	} else {
12328 		drm_err("mismatch in %s infoframe", name);
12329 		drm_err("expected:");
12330 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12331 		drm_err("found");
12332 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12333 	}
12334 }
12335 
12336 static void __printf(3, 4)
12337 pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
12338 {
12339 	struct va_format vaf;
12340 	va_list args;
12341 
12342 	va_start(args, format);
12343 	vaf.fmt = format;
12344 	vaf.va = &args;
12345 
12346 	if (fastset)
12347 		drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
12348 	else
12349 		drm_err("mismatch in %s %pV", name, &vaf);
12350 
12351 	va_end(args);
12352 }
12353 
12354 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12355 {
12356 	if (i915_modparams.fastboot != -1)
12357 		return i915_modparams.fastboot;
12358 
12359 	/* Enable fastboot by default on Skylake and newer */
12360 	if (INTEL_GEN(dev_priv) >= 9)
12361 		return true;
12362 
12363 	/* Enable fastboot by default on VLV and CHV */
12364 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12365 		return true;
12366 
12367 	/* Disabled by default on all others */
12368 	return false;
12369 }
12370 
12371 static bool
12372 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12373 			  const struct intel_crtc_state *pipe_config,
12374 			  bool fastset)
12375 {
12376 	struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
12377 	bool ret = true;
12378 	bool fixup_inherited = fastset &&
12379 		(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12380 		!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12381 
12382 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12383 		DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12384 		ret = false;
12385 	}
12386 
12387 #define PIPE_CONF_CHECK_X(name) do { \
12388 	if (current_config->name != pipe_config->name) { \
12389 		pipe_config_mismatch(fastset, __stringify(name), \
12390 				     "(expected 0x%08x, found 0x%08x)\n", \
12391 				     current_config->name, \
12392 				     pipe_config->name); \
12393 		ret = false; \
12394 	} \
12395 } while (0)
12396 
12397 #define PIPE_CONF_CHECK_I(name) do { \
12398 	if (current_config->name != pipe_config->name) { \
12399 		pipe_config_mismatch(fastset, __stringify(name), \
12400 				     "(expected %i, found %i)\n", \
12401 				     current_config->name, \
12402 				     pipe_config->name); \
12403 		ret = false; \
12404 	} \
12405 } while (0)
12406 
12407 #define PIPE_CONF_CHECK_BOOL(name) do { \
12408 	if (current_config->name != pipe_config->name) { \
12409 		pipe_config_mismatch(fastset, __stringify(name), \
12410 				     "(expected %s, found %s)\n", \
12411 				     yesno(current_config->name), \
12412 				     yesno(pipe_config->name)); \
12413 		ret = false; \
12414 	} \
12415 } while (0)
12416 
12417 /*
12418  * Checks state where we only read out the enabling, but not the entire
12419  * state itself (like full infoframes or ELD for audio). These states
12420  * require a full modeset on bootup to fix up.
12421  */
12422 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12423 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12424 		PIPE_CONF_CHECK_BOOL(name); \
12425 	} else { \
12426 		pipe_config_mismatch(fastset, __stringify(name), \
12427 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12428 				     yesno(current_config->name), \
12429 				     yesno(pipe_config->name)); \
12430 		ret = false; \
12431 	} \
12432 } while (0)
12433 
12434 #define PIPE_CONF_CHECK_P(name) do { \
12435 	if (current_config->name != pipe_config->name) { \
12436 		pipe_config_mismatch(fastset, __stringify(name), \
12437 				     "(expected %p, found %p)\n", \
12438 				     current_config->name, \
12439 				     pipe_config->name); \
12440 		ret = false; \
12441 	} \
12442 } while (0)
12443 
12444 #define PIPE_CONF_CHECK_M_N(name) do { \
12445 	if (!intel_compare_link_m_n(&current_config->name, \
12446 				    &pipe_config->name,\
12447 				    !fastset)) { \
12448 		pipe_config_mismatch(fastset, __stringify(name), \
12449 				     "(expected tu %i gmch %i/%i link %i/%i, " \
12450 				     "found tu %i, gmch %i/%i link %i/%i)\n", \
12451 				     current_config->name.tu, \
12452 				     current_config->name.gmch_m, \
12453 				     current_config->name.gmch_n, \
12454 				     current_config->name.link_m, \
12455 				     current_config->name.link_n, \
12456 				     pipe_config->name.tu, \
12457 				     pipe_config->name.gmch_m, \
12458 				     pipe_config->name.gmch_n, \
12459 				     pipe_config->name.link_m, \
12460 				     pipe_config->name.link_n); \
12461 		ret = false; \
12462 	} \
12463 } while (0)
12464 
12465 /* This is required for BDW+ where there is only one set of registers for
12466  * switching between high and low RR.
12467  * This macro can be used whenever a comparison has to be made between one
12468  * hw state and multiple sw state variables.
12469  */
12470 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12471 	if (!intel_compare_link_m_n(&current_config->name, \
12472 				    &pipe_config->name, !fastset) && \
12473 	    !intel_compare_link_m_n(&current_config->alt_name, \
12474 				    &pipe_config->name, !fastset)) { \
12475 		pipe_config_mismatch(fastset, __stringify(name), \
12476 				     "(expected tu %i gmch %i/%i link %i/%i, " \
12477 				     "or tu %i gmch %i/%i link %i/%i, " \
12478 				     "found tu %i, gmch %i/%i link %i/%i)\n", \
12479 				     current_config->name.tu, \
12480 				     current_config->name.gmch_m, \
12481 				     current_config->name.gmch_n, \
12482 				     current_config->name.link_m, \
12483 				     current_config->name.link_n, \
12484 				     current_config->alt_name.tu, \
12485 				     current_config->alt_name.gmch_m, \
12486 				     current_config->alt_name.gmch_n, \
12487 				     current_config->alt_name.link_m, \
12488 				     current_config->alt_name.link_n, \
12489 				     pipe_config->name.tu, \
12490 				     pipe_config->name.gmch_m, \
12491 				     pipe_config->name.gmch_n, \
12492 				     pipe_config->name.link_m, \
12493 				     pipe_config->name.link_n); \
12494 		ret = false; \
12495 	} \
12496 } while (0)
12497 
12498 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12499 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12500 		pipe_config_mismatch(fastset, __stringify(name), \
12501 				     "(%x) (expected %i, found %i)\n", \
12502 				     (mask), \
12503 				     current_config->name & (mask), \
12504 				     pipe_config->name & (mask)); \
12505 		ret = false; \
12506 	} \
12507 } while (0)
12508 
12509 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12510 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12511 		pipe_config_mismatch(fastset, __stringify(name), \
12512 				     "(expected %i, found %i)\n", \
12513 				     current_config->name, \
12514 				     pipe_config->name); \
12515 		ret = false; \
12516 	} \
12517 } while (0)
12518 
12519 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12520 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
12521 				     &pipe_config->infoframes.name)) { \
12522 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
12523 					       &current_config->infoframes.name, \
12524 					       &pipe_config->infoframes.name); \
12525 		ret = false; \
12526 	} \
12527 } while (0)
12528 
12529 #define PIPE_CONF_QUIRK(quirk) \
12530 	((current_config->quirks | pipe_config->quirks) & (quirk))
12531 
12532 	PIPE_CONF_CHECK_I(cpu_transcoder);
12533 
12534 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12535 	PIPE_CONF_CHECK_I(fdi_lanes);
12536 	PIPE_CONF_CHECK_M_N(fdi_m_n);
12537 
12538 	PIPE_CONF_CHECK_I(lane_count);
12539 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12540 
12541 	if (INTEL_GEN(dev_priv) < 8) {
12542 		PIPE_CONF_CHECK_M_N(dp_m_n);
12543 
12544 		if (current_config->has_drrs)
12545 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12546 	} else
12547 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12548 
12549 	PIPE_CONF_CHECK_X(output_types);
12550 
12551 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12552 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12553 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12554 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12555 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12556 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12557 
12558 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12559 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12560 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12561 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12562 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12563 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12564 
12565 	PIPE_CONF_CHECK_I(pixel_multiplier);
12566 	PIPE_CONF_CHECK_I(output_format);
12567 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12568 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12569 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12570 		PIPE_CONF_CHECK_BOOL(limited_color_range);
12571 
12572 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12573 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12574 	PIPE_CONF_CHECK_BOOL(has_infoframe);
12575 
12576 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12577 
12578 	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12579 			      DRM_MODE_FLAG_INTERLACE);
12580 
12581 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12582 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12583 				      DRM_MODE_FLAG_PHSYNC);
12584 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12585 				      DRM_MODE_FLAG_NHSYNC);
12586 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12587 				      DRM_MODE_FLAG_PVSYNC);
12588 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12589 				      DRM_MODE_FLAG_NVSYNC);
12590 	}
12591 
12592 	PIPE_CONF_CHECK_X(gmch_pfit.control);
12593 	/* pfit ratios are autocomputed by the hw on gen4+ */
12594 	if (INTEL_GEN(dev_priv) < 4)
12595 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12596 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12597 
12598 	/*
12599 	 * Changing the EDP transcoder input mux
12600 	 * (A_ONOFF vs. A_ON) requires a full modeset.
12601 	 */
12602 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
12603 
12604 	if (!fastset) {
12605 		PIPE_CONF_CHECK_I(pipe_src_w);
12606 		PIPE_CONF_CHECK_I(pipe_src_h);
12607 
12608 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12609 		if (current_config->pch_pfit.enabled) {
12610 			PIPE_CONF_CHECK_X(pch_pfit.pos);
12611 			PIPE_CONF_CHECK_X(pch_pfit.size);
12612 		}
12613 
12614 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12615 		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12616 
12617 		PIPE_CONF_CHECK_X(gamma_mode);
12618 		if (IS_CHERRYVIEW(dev_priv))
12619 			PIPE_CONF_CHECK_X(cgm_mode);
12620 		else
12621 			PIPE_CONF_CHECK_X(csc_mode);
12622 		PIPE_CONF_CHECK_BOOL(gamma_enable);
12623 		PIPE_CONF_CHECK_BOOL(csc_enable);
12624 	}
12625 
12626 	PIPE_CONF_CHECK_BOOL(double_wide);
12627 
12628 	PIPE_CONF_CHECK_P(shared_dpll);
12629 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12630 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12631 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12632 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12633 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12634 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12635 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12636 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12637 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12638 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12639 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12640 	PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12641 	PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12642 	PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12643 	PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12644 	PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12645 	PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12646 	PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12647 	PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12648 	PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12649 	PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12650 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12651 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12652 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12653 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12654 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12655 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12656 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12657 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12658 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12659 	PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12660 
12661 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12662 	PIPE_CONF_CHECK_X(dsi_pll.div);
12663 
12664 	if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12665 		PIPE_CONF_CHECK_I(pipe_bpp);
12666 
12667 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12668 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12669 
12670 	PIPE_CONF_CHECK_I(min_voltage_level);
12671 
12672 	PIPE_CONF_CHECK_X(infoframes.enable);
12673 	PIPE_CONF_CHECK_X(infoframes.gcp);
12674 	PIPE_CONF_CHECK_INFOFRAME(avi);
12675 	PIPE_CONF_CHECK_INFOFRAME(spd);
12676 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
12677 	PIPE_CONF_CHECK_INFOFRAME(drm);
12678 
12679 #undef PIPE_CONF_CHECK_X
12680 #undef PIPE_CONF_CHECK_I
12681 #undef PIPE_CONF_CHECK_BOOL
12682 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12683 #undef PIPE_CONF_CHECK_P
12684 #undef PIPE_CONF_CHECK_FLAGS
12685 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12686 #undef PIPE_CONF_QUIRK
12687 
12688 	return ret;
12689 }
12690 
12691 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12692 					   const struct intel_crtc_state *pipe_config)
12693 {
12694 	if (pipe_config->has_pch_encoder) {
12695 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12696 							    &pipe_config->fdi_m_n);
12697 		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12698 
12699 		/*
12700 		 * FDI already provided one idea for the dotclock.
12701 		 * Yell if the encoder disagrees.
12702 		 */
12703 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12704 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12705 		     fdi_dotclock, dotclock);
12706 	}
12707 }
12708 
12709 static void verify_wm_state(struct drm_crtc *crtc,
12710 			    struct drm_crtc_state *new_state)
12711 {
12712 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
12713 	struct skl_hw_state {
12714 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12715 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12716 		struct skl_ddb_allocation ddb;
12717 		struct skl_pipe_wm wm;
12718 	} *hw;
12719 	struct skl_ddb_allocation *sw_ddb;
12720 	struct skl_pipe_wm *sw_wm;
12721 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12722 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12723 	const enum pipe pipe = intel_crtc->pipe;
12724 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
12725 
12726 	if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
12727 		return;
12728 
12729 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12730 	if (!hw)
12731 		return;
12732 
12733 	skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
12734 	sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
12735 
12736 	skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
12737 
12738 	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12739 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12740 
12741 	if (INTEL_GEN(dev_priv) >= 11 &&
12742 	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12743 		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12744 			  sw_ddb->enabled_slices,
12745 			  hw->ddb.enabled_slices);
12746 
12747 	/* planes */
12748 	for_each_universal_plane(dev_priv, pipe, plane) {
12749 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12750 
12751 		hw_plane_wm = &hw->wm.planes[plane];
12752 		sw_plane_wm = &sw_wm->planes[plane];
12753 
12754 		/* Watermarks */
12755 		for (level = 0; level <= max_level; level++) {
12756 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12757 						&sw_plane_wm->wm[level]))
12758 				continue;
12759 
12760 			DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12761 				  pipe_name(pipe), plane + 1, level,
12762 				  sw_plane_wm->wm[level].plane_en,
12763 				  sw_plane_wm->wm[level].plane_res_b,
12764 				  sw_plane_wm->wm[level].plane_res_l,
12765 				  hw_plane_wm->wm[level].plane_en,
12766 				  hw_plane_wm->wm[level].plane_res_b,
12767 				  hw_plane_wm->wm[level].plane_res_l);
12768 		}
12769 
12770 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12771 					 &sw_plane_wm->trans_wm)) {
12772 			DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12773 				  pipe_name(pipe), plane + 1,
12774 				  sw_plane_wm->trans_wm.plane_en,
12775 				  sw_plane_wm->trans_wm.plane_res_b,
12776 				  sw_plane_wm->trans_wm.plane_res_l,
12777 				  hw_plane_wm->trans_wm.plane_en,
12778 				  hw_plane_wm->trans_wm.plane_res_b,
12779 				  hw_plane_wm->trans_wm.plane_res_l);
12780 		}
12781 
12782 		/* DDB */
12783 		hw_ddb_entry = &hw->ddb_y[plane];
12784 		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
12785 
12786 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12787 			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12788 				  pipe_name(pipe), plane + 1,
12789 				  sw_ddb_entry->start, sw_ddb_entry->end,
12790 				  hw_ddb_entry->start, hw_ddb_entry->end);
12791 		}
12792 	}
12793 
12794 	/*
12795 	 * cursor
12796 	 * If the cursor plane isn't active, we may not have updated it's ddb
12797 	 * allocation. In that case since the ddb allocation will be updated
12798 	 * once the plane becomes visible, we can skip this check
12799 	 */
12800 	if (1) {
12801 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12802 
12803 		hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12804 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12805 
12806 		/* Watermarks */
12807 		for (level = 0; level <= max_level; level++) {
12808 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12809 						&sw_plane_wm->wm[level]))
12810 				continue;
12811 
12812 			DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12813 				  pipe_name(pipe), level,
12814 				  sw_plane_wm->wm[level].plane_en,
12815 				  sw_plane_wm->wm[level].plane_res_b,
12816 				  sw_plane_wm->wm[level].plane_res_l,
12817 				  hw_plane_wm->wm[level].plane_en,
12818 				  hw_plane_wm->wm[level].plane_res_b,
12819 				  hw_plane_wm->wm[level].plane_res_l);
12820 		}
12821 
12822 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12823 					 &sw_plane_wm->trans_wm)) {
12824 			DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12825 				  pipe_name(pipe),
12826 				  sw_plane_wm->trans_wm.plane_en,
12827 				  sw_plane_wm->trans_wm.plane_res_b,
12828 				  sw_plane_wm->trans_wm.plane_res_l,
12829 				  hw_plane_wm->trans_wm.plane_en,
12830 				  hw_plane_wm->trans_wm.plane_res_b,
12831 				  hw_plane_wm->trans_wm.plane_res_l);
12832 		}
12833 
12834 		/* DDB */
12835 		hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12836 		sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
12837 
12838 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12839 			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12840 				  pipe_name(pipe),
12841 				  sw_ddb_entry->start, sw_ddb_entry->end,
12842 				  hw_ddb_entry->start, hw_ddb_entry->end);
12843 		}
12844 	}
12845 
12846 	kfree(hw);
12847 }
12848 
12849 static void
12850 verify_connector_state(struct drm_device *dev,
12851 		       struct drm_atomic_state *state,
12852 		       struct drm_crtc *crtc)
12853 {
12854 	struct drm_connector *connector;
12855 	struct drm_connector_state *new_conn_state;
12856 	int i;
12857 
12858 	for_each_new_connector_in_state(state, connector, new_conn_state, i) {
12859 		struct drm_encoder *encoder = connector->encoder;
12860 		struct drm_crtc_state *crtc_state = NULL;
12861 
12862 		if (new_conn_state->crtc != crtc)
12863 			continue;
12864 
12865 		if (crtc)
12866 			crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12867 
12868 		intel_connector_verify_state(crtc_state, new_conn_state);
12869 
12870 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12871 		     "connector's atomic encoder doesn't match legacy encoder\n");
12872 	}
12873 }
12874 
12875 static void
12876 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
12877 {
12878 	struct intel_encoder *encoder;
12879 	struct drm_connector *connector;
12880 	struct drm_connector_state *old_conn_state, *new_conn_state;
12881 	int i;
12882 
12883 	for_each_intel_encoder(dev, encoder) {
12884 		bool enabled = false, found = false;
12885 		enum pipe pipe;
12886 
12887 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12888 			      encoder->base.base.id,
12889 			      encoder->base.name);
12890 
12891 		for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12892 						   new_conn_state, i) {
12893 			if (old_conn_state->best_encoder == &encoder->base)
12894 				found = true;
12895 
12896 			if (new_conn_state->best_encoder != &encoder->base)
12897 				continue;
12898 			found = enabled = true;
12899 
12900 			I915_STATE_WARN(new_conn_state->crtc !=
12901 					encoder->base.crtc,
12902 			     "connector's crtc doesn't match encoder crtc\n");
12903 		}
12904 
12905 		if (!found)
12906 			continue;
12907 
12908 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
12909 		     "encoder's enabled state mismatch "
12910 		     "(expected %i, found %i)\n",
12911 		     !!encoder->base.crtc, enabled);
12912 
12913 		if (!encoder->base.crtc) {
12914 			bool active;
12915 
12916 			active = encoder->get_hw_state(encoder, &pipe);
12917 			I915_STATE_WARN(active,
12918 			     "encoder detached but still enabled on pipe %c.\n",
12919 			     pipe_name(pipe));
12920 		}
12921 	}
12922 }
12923 
12924 static void
12925 verify_crtc_state(struct drm_crtc *crtc,
12926 		  struct drm_crtc_state *old_crtc_state,
12927 		  struct drm_crtc_state *new_crtc_state)
12928 {
12929 	struct drm_device *dev = crtc->dev;
12930 	struct drm_i915_private *dev_priv = to_i915(dev);
12931 	struct intel_encoder *encoder;
12932 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12933 	struct intel_crtc_state *pipe_config, *sw_config;
12934 	struct drm_atomic_state *old_state;
12935 	bool active;
12936 
12937 	old_state = old_crtc_state->state;
12938 	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12939 	pipe_config = to_intel_crtc_state(old_crtc_state);
12940 	memset(pipe_config, 0, sizeof(*pipe_config));
12941 	pipe_config->base.crtc = crtc;
12942 	pipe_config->base.state = old_state;
12943 
12944 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12945 
12946 	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12947 
12948 	/* we keep both pipes enabled on 830 */
12949 	if (IS_I830(dev_priv))
12950 		active = new_crtc_state->active;
12951 
12952 	I915_STATE_WARN(new_crtc_state->active != active,
12953 	     "crtc active state doesn't match with hw state "
12954 	     "(expected %i, found %i)\n", new_crtc_state->active, active);
12955 
12956 	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12957 	     "transitional active state does not match atomic hw state "
12958 	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12959 
12960 	for_each_encoder_on_crtc(dev, crtc, encoder) {
12961 		enum pipe pipe;
12962 
12963 		active = encoder->get_hw_state(encoder, &pipe);
12964 		I915_STATE_WARN(active != new_crtc_state->active,
12965 			"[ENCODER:%i] active %i with crtc active %i\n",
12966 			encoder->base.base.id, active, new_crtc_state->active);
12967 
12968 		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12969 				"Encoder connected to wrong pipe %c\n",
12970 				pipe_name(pipe));
12971 
12972 		if (active)
12973 			encoder->get_config(encoder, pipe_config);
12974 	}
12975 
12976 	intel_crtc_compute_pixel_rate(pipe_config);
12977 
12978 	if (!new_crtc_state->active)
12979 		return;
12980 
12981 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
12982 
12983 	sw_config = to_intel_crtc_state(new_crtc_state);
12984 	if (!intel_pipe_config_compare(sw_config, pipe_config, false)) {
12985 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
12986 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
12987 		intel_dump_pipe_config(sw_config, NULL, "[sw state]");
12988 	}
12989 }
12990 
12991 static void
12992 intel_verify_planes(struct intel_atomic_state *state)
12993 {
12994 	struct intel_plane *plane;
12995 	const struct intel_plane_state *plane_state;
12996 	int i;
12997 
12998 	for_each_new_intel_plane_in_state(state, plane,
12999 					  plane_state, i)
13000 		assert_plane(plane, plane_state->slave ||
13001 			     plane_state->base.visible);
13002 }
13003 
13004 static void
13005 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13006 			 struct intel_shared_dpll *pll,
13007 			 struct drm_crtc *crtc,
13008 			 struct drm_crtc_state *new_state)
13009 {
13010 	struct intel_dpll_hw_state dpll_hw_state;
13011 	unsigned int crtc_mask;
13012 	bool active;
13013 
13014 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13015 
13016 	DRM_DEBUG_KMS("%s\n", pll->info->name);
13017 
13018 	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13019 
13020 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13021 		I915_STATE_WARN(!pll->on && pll->active_mask,
13022 		     "pll in active use but not on in sw tracking\n");
13023 		I915_STATE_WARN(pll->on && !pll->active_mask,
13024 		     "pll is on but not used by any active crtc\n");
13025 		I915_STATE_WARN(pll->on != active,
13026 		     "pll on state mismatch (expected %i, found %i)\n",
13027 		     pll->on, active);
13028 	}
13029 
13030 	if (!crtc) {
13031 		I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13032 				"more active pll users than references: %x vs %x\n",
13033 				pll->active_mask, pll->state.crtc_mask);
13034 
13035 		return;
13036 	}
13037 
13038 	crtc_mask = drm_crtc_mask(crtc);
13039 
13040 	if (new_state->active)
13041 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13042 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13043 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13044 	else
13045 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13046 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13047 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13048 
13049 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13050 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13051 			crtc_mask, pll->state.crtc_mask);
13052 
13053 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13054 					  &dpll_hw_state,
13055 					  sizeof(dpll_hw_state)),
13056 			"pll hw state mismatch\n");
13057 }
13058 
13059 static void
13060 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13061 			 struct drm_crtc_state *old_crtc_state,
13062 			 struct drm_crtc_state *new_crtc_state)
13063 {
13064 	struct drm_i915_private *dev_priv = to_i915(dev);
13065 	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13066 	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13067 
13068 	if (new_state->shared_dpll)
13069 		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13070 
13071 	if (old_state->shared_dpll &&
13072 	    old_state->shared_dpll != new_state->shared_dpll) {
13073 		unsigned int crtc_mask = drm_crtc_mask(crtc);
13074 		struct intel_shared_dpll *pll = old_state->shared_dpll;
13075 
13076 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13077 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
13078 				pipe_name(drm_crtc_index(crtc)));
13079 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13080 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
13081 				pipe_name(drm_crtc_index(crtc)));
13082 	}
13083 }
13084 
13085 static void
13086 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13087 			  struct drm_atomic_state *state,
13088 			  struct drm_crtc_state *old_state,
13089 			  struct drm_crtc_state *new_state)
13090 {
13091 	if (!needs_modeset(new_state) &&
13092 	    !to_intel_crtc_state(new_state)->update_pipe)
13093 		return;
13094 
13095 	verify_wm_state(crtc, new_state);
13096 	verify_connector_state(crtc->dev, state, crtc);
13097 	verify_crtc_state(crtc, old_state, new_state);
13098 	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13099 }
13100 
13101 static void
13102 verify_disabled_dpll_state(struct drm_device *dev)
13103 {
13104 	struct drm_i915_private *dev_priv = to_i915(dev);
13105 	int i;
13106 
13107 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
13108 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13109 }
13110 
13111 static void
13112 intel_modeset_verify_disabled(struct drm_device *dev,
13113 			      struct drm_atomic_state *state)
13114 {
13115 	verify_encoder_state(dev, state);
13116 	verify_connector_state(dev, state, NULL);
13117 	verify_disabled_dpll_state(dev);
13118 }
13119 
13120 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
13121 {
13122 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13123 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13124 
13125 	/*
13126 	 * The scanline counter increments at the leading edge of hsync.
13127 	 *
13128 	 * On most platforms it starts counting from vtotal-1 on the
13129 	 * first active line. That means the scanline counter value is
13130 	 * always one less than what we would expect. Ie. just after
13131 	 * start of vblank, which also occurs at start of hsync (on the
13132 	 * last active line), the scanline counter will read vblank_start-1.
13133 	 *
13134 	 * On gen2 the scanline counter starts counting from 1 instead
13135 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13136 	 * to keep the value positive), instead of adding one.
13137 	 *
13138 	 * On HSW+ the behaviour of the scanline counter depends on the output
13139 	 * type. For DP ports it behaves like most other platforms, but on HDMI
13140 	 * there's an extra 1 line difference. So we need to add two instead of
13141 	 * one to the value.
13142 	 *
13143 	 * On VLV/CHV DSI the scanline counter would appear to increment
13144 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13145 	 * that means we can't tell whether we're in vblank or not while
13146 	 * we're on that particular line. We must still set scanline_offset
13147 	 * to 1 so that the vblank timestamps come out correct when we query
13148 	 * the scanline counter from within the vblank interrupt handler.
13149 	 * However if queried just before the start of vblank we'll get an
13150 	 * answer that's slightly in the future.
13151 	 */
13152 	if (IS_GEN(dev_priv, 2)) {
13153 		const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
13154 		int vtotal;
13155 
13156 		vtotal = adjusted_mode->crtc_vtotal;
13157 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13158 			vtotal /= 2;
13159 
13160 		crtc->scanline_offset = vtotal - 1;
13161 	} else if (HAS_DDI(dev_priv) &&
13162 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13163 		crtc->scanline_offset = 2;
13164 	} else
13165 		crtc->scanline_offset = 1;
13166 }
13167 
13168 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13169 {
13170 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13171 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13172 	struct intel_crtc *crtc;
13173 	int i;
13174 
13175 	if (!dev_priv->display.crtc_compute_clock)
13176 		return;
13177 
13178 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13179 					    new_crtc_state, i) {
13180 		struct intel_shared_dpll *old_dpll =
13181 			old_crtc_state->shared_dpll;
13182 
13183 		if (!needs_modeset(&new_crtc_state->base))
13184 			continue;
13185 
13186 		new_crtc_state->shared_dpll = NULL;
13187 
13188 		if (!old_dpll)
13189 			continue;
13190 
13191 		intel_release_shared_dpll(old_dpll, crtc, &state->base);
13192 	}
13193 }
13194 
13195 /*
13196  * This implements the workaround described in the "notes" section of the mode
13197  * set sequence documentation. When going from no pipes or single pipe to
13198  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13199  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13200  */
13201 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13202 {
13203 	struct intel_crtc_state *crtc_state;
13204 	struct intel_crtc *crtc;
13205 	struct intel_crtc_state *first_crtc_state = NULL;
13206 	struct intel_crtc_state *other_crtc_state = NULL;
13207 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13208 	int i;
13209 
13210 	/* look at all crtc's that are going to be enabled in during modeset */
13211 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13212 		if (!crtc_state->base.active ||
13213 		    !needs_modeset(&crtc_state->base))
13214 			continue;
13215 
13216 		if (first_crtc_state) {
13217 			other_crtc_state = crtc_state;
13218 			break;
13219 		} else {
13220 			first_crtc_state = crtc_state;
13221 			first_pipe = crtc->pipe;
13222 		}
13223 	}
13224 
13225 	/* No workaround needed? */
13226 	if (!first_crtc_state)
13227 		return 0;
13228 
13229 	/* w/a possibly needed, check how many crtc's are already enabled. */
13230 	for_each_intel_crtc(state->base.dev, crtc) {
13231 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13232 		if (IS_ERR(crtc_state))
13233 			return PTR_ERR(crtc_state);
13234 
13235 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13236 
13237 		if (!crtc_state->base.active ||
13238 		    needs_modeset(&crtc_state->base))
13239 			continue;
13240 
13241 		/* 2 or more enabled crtcs means no need for w/a */
13242 		if (enabled_pipe != INVALID_PIPE)
13243 			return 0;
13244 
13245 		enabled_pipe = crtc->pipe;
13246 	}
13247 
13248 	if (enabled_pipe != INVALID_PIPE)
13249 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13250 	else if (other_crtc_state)
13251 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13252 
13253 	return 0;
13254 }
13255 
13256 static int intel_lock_all_pipes(struct drm_atomic_state *state)
13257 {
13258 	struct drm_crtc *crtc;
13259 
13260 	/* Add all pipes to the state */
13261 	for_each_crtc(state->dev, crtc) {
13262 		struct drm_crtc_state *crtc_state;
13263 
13264 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13265 		if (IS_ERR(crtc_state))
13266 			return PTR_ERR(crtc_state);
13267 	}
13268 
13269 	return 0;
13270 }
13271 
13272 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13273 {
13274 	struct drm_crtc *crtc;
13275 
13276 	/*
13277 	 * Add all pipes to the state, and force
13278 	 * a modeset on all the active ones.
13279 	 */
13280 	for_each_crtc(state->dev, crtc) {
13281 		struct drm_crtc_state *crtc_state;
13282 		int ret;
13283 
13284 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13285 		if (IS_ERR(crtc_state))
13286 			return PTR_ERR(crtc_state);
13287 
13288 		if (!crtc_state->active || needs_modeset(crtc_state))
13289 			continue;
13290 
13291 		crtc_state->mode_changed = true;
13292 
13293 		ret = drm_atomic_add_affected_connectors(state, crtc);
13294 		if (ret)
13295 			return ret;
13296 
13297 		ret = drm_atomic_add_affected_planes(state, crtc);
13298 		if (ret)
13299 			return ret;
13300 	}
13301 
13302 	return 0;
13303 }
13304 
13305 static int intel_modeset_checks(struct intel_atomic_state *state)
13306 {
13307 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13308 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13309 	struct intel_crtc *crtc;
13310 	int ret = 0, i;
13311 
13312 	if (!check_digital_port_conflicts(state)) {
13313 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13314 		return -EINVAL;
13315 	}
13316 
13317 	/* keep the current setting */
13318 	if (!state->cdclk.force_min_cdclk_changed)
13319 		state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13320 
13321 	state->modeset = true;
13322 	state->active_crtcs = dev_priv->active_crtcs;
13323 	state->cdclk.logical = dev_priv->cdclk.logical;
13324 	state->cdclk.actual = dev_priv->cdclk.actual;
13325 	state->cdclk.pipe = INVALID_PIPE;
13326 
13327 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13328 					    new_crtc_state, i) {
13329 		if (new_crtc_state->base.active)
13330 			state->active_crtcs |= 1 << i;
13331 		else
13332 			state->active_crtcs &= ~(1 << i);
13333 
13334 		if (old_crtc_state->base.active != new_crtc_state->base.active)
13335 			state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
13336 	}
13337 
13338 	/*
13339 	 * See if the config requires any additional preparation, e.g.
13340 	 * to adjust global state with pipes off.  We need to do this
13341 	 * here so we can get the modeset_pipe updated config for the new
13342 	 * mode set on this crtc.  For other crtcs we need to use the
13343 	 * adjusted_mode bits in the crtc directly.
13344 	 */
13345 	if (dev_priv->display.modeset_calc_cdclk) {
13346 		enum pipe pipe;
13347 
13348 		ret = dev_priv->display.modeset_calc_cdclk(state);
13349 		if (ret < 0)
13350 			return ret;
13351 
13352 		/*
13353 		 * Writes to dev_priv->cdclk.logical must protected by
13354 		 * holding all the crtc locks, even if we don't end up
13355 		 * touching the hardware
13356 		 */
13357 		if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13358 					&state->cdclk.logical)) {
13359 			ret = intel_lock_all_pipes(&state->base);
13360 			if (ret < 0)
13361 				return ret;
13362 		}
13363 
13364 		if (is_power_of_2(state->active_crtcs)) {
13365 			struct drm_crtc *crtc;
13366 			struct drm_crtc_state *crtc_state;
13367 
13368 			pipe = ilog2(state->active_crtcs);
13369 			crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
13370 			crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
13371 			if (crtc_state && needs_modeset(crtc_state))
13372 				pipe = INVALID_PIPE;
13373 		} else {
13374 			pipe = INVALID_PIPE;
13375 		}
13376 
13377 		/* All pipes must be switched off while we change the cdclk. */
13378 		if (pipe != INVALID_PIPE &&
13379 		    intel_cdclk_needs_cd2x_update(dev_priv,
13380 						  &dev_priv->cdclk.actual,
13381 						  &state->cdclk.actual)) {
13382 			ret = intel_lock_all_pipes(&state->base);
13383 			if (ret < 0)
13384 				return ret;
13385 
13386 			state->cdclk.pipe = pipe;
13387 		} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13388 						     &state->cdclk.actual)) {
13389 			ret = intel_modeset_all_pipes(&state->base);
13390 			if (ret < 0)
13391 				return ret;
13392 
13393 			state->cdclk.pipe = INVALID_PIPE;
13394 		}
13395 
13396 		DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13397 			      state->cdclk.logical.cdclk,
13398 			      state->cdclk.actual.cdclk);
13399 		DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13400 			      state->cdclk.logical.voltage_level,
13401 			      state->cdclk.actual.voltage_level);
13402 	}
13403 
13404 	intel_modeset_clear_plls(state);
13405 
13406 	if (IS_HASWELL(dev_priv))
13407 		return haswell_mode_set_planes_workaround(state);
13408 
13409 	return 0;
13410 }
13411 
13412 /*
13413  * Handle calculation of various watermark data at the end of the atomic check
13414  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13415  * handlers to ensure that all derived state has been updated.
13416  */
13417 static int calc_watermark_data(struct intel_atomic_state *state)
13418 {
13419 	struct drm_device *dev = state->base.dev;
13420 	struct drm_i915_private *dev_priv = to_i915(dev);
13421 
13422 	/* Is there platform-specific watermark information to calculate? */
13423 	if (dev_priv->display.compute_global_watermarks)
13424 		return dev_priv->display.compute_global_watermarks(state);
13425 
13426 	return 0;
13427 }
13428 
13429 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13430 				     struct intel_crtc_state *new_crtc_state)
13431 {
13432 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13433 		return;
13434 
13435 	new_crtc_state->base.mode_changed = false;
13436 	new_crtc_state->update_pipe = true;
13437 
13438 	/*
13439 	 * If we're not doing the full modeset we want to
13440 	 * keep the current M/N values as they may be
13441 	 * sufficiently different to the computed values
13442 	 * to cause problems.
13443 	 *
13444 	 * FIXME: should really copy more fuzzy state here
13445 	 */
13446 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13447 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13448 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13449 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13450 }
13451 
13452 /**
13453  * intel_atomic_check - validate state object
13454  * @dev: drm device
13455  * @_state: state to validate
13456  */
13457 static int intel_atomic_check(struct drm_device *dev,
13458 			      struct drm_atomic_state *_state)
13459 {
13460 	struct drm_i915_private *dev_priv = to_i915(dev);
13461 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
13462 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13463 	struct intel_crtc *crtc;
13464 	int ret, i;
13465 	bool any_ms = state->cdclk.force_min_cdclk_changed;
13466 
13467 	/* Catch I915_MODE_FLAG_INHERITED */
13468 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13469 					    new_crtc_state, i) {
13470 		if (new_crtc_state->base.mode.private_flags !=
13471 		    old_crtc_state->base.mode.private_flags)
13472 			new_crtc_state->base.mode_changed = true;
13473 	}
13474 
13475 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
13476 	if (ret)
13477 		goto fail;
13478 
13479 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13480 					    new_crtc_state, i) {
13481 		if (!needs_modeset(&new_crtc_state->base))
13482 			continue;
13483 
13484 		if (!new_crtc_state->base.enable) {
13485 			any_ms = true;
13486 			continue;
13487 		}
13488 
13489 		ret = intel_modeset_pipe_config(new_crtc_state);
13490 		if (ret)
13491 			goto fail;
13492 
13493 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
13494 
13495 		if (needs_modeset(&new_crtc_state->base))
13496 			any_ms = true;
13497 	}
13498 
13499 	ret = drm_dp_mst_atomic_check(&state->base);
13500 	if (ret)
13501 		goto fail;
13502 
13503 	if (any_ms) {
13504 		ret = intel_modeset_checks(state);
13505 		if (ret)
13506 			goto fail;
13507 	} else {
13508 		state->cdclk.logical = dev_priv->cdclk.logical;
13509 	}
13510 
13511 	ret = icl_add_linked_planes(state);
13512 	if (ret)
13513 		goto fail;
13514 
13515 	ret = drm_atomic_helper_check_planes(dev, &state->base);
13516 	if (ret)
13517 		goto fail;
13518 
13519 	intel_fbc_choose_crtc(dev_priv, state);
13520 	ret = calc_watermark_data(state);
13521 	if (ret)
13522 		goto fail;
13523 
13524 	ret = intel_bw_atomic_check(state);
13525 	if (ret)
13526 		goto fail;
13527 
13528 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13529 					    new_crtc_state, i) {
13530 		if (!needs_modeset(&new_crtc_state->base) &&
13531 		    !new_crtc_state->update_pipe)
13532 			continue;
13533 
13534 		intel_dump_pipe_config(new_crtc_state, state,
13535 				       needs_modeset(&new_crtc_state->base) ?
13536 				       "[modeset]" : "[fastset]");
13537 	}
13538 
13539 	return 0;
13540 
13541  fail:
13542 	if (ret == -EDEADLK)
13543 		return ret;
13544 
13545 	/*
13546 	 * FIXME would probably be nice to know which crtc specifically
13547 	 * caused the failure, in cases where we can pinpoint it.
13548 	 */
13549 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13550 					    new_crtc_state, i)
13551 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
13552 
13553 	return ret;
13554 }
13555 
13556 static int intel_atomic_prepare_commit(struct drm_device *dev,
13557 				       struct drm_atomic_state *state)
13558 {
13559 	return drm_atomic_helper_prepare_planes(dev, state);
13560 }
13561 
13562 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13563 {
13564 	struct drm_device *dev = crtc->base.dev;
13565 	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13566 
13567 	if (!vblank->max_vblank_count)
13568 		return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13569 
13570 	return dev->driver->get_vblank_counter(dev, crtc->pipe);
13571 }
13572 
13573 static void intel_update_crtc(struct drm_crtc *crtc,
13574 			      struct drm_atomic_state *state,
13575 			      struct drm_crtc_state *old_crtc_state,
13576 			      struct drm_crtc_state *new_crtc_state)
13577 {
13578 	struct drm_device *dev = crtc->dev;
13579 	struct drm_i915_private *dev_priv = to_i915(dev);
13580 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13581 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13582 	bool modeset = needs_modeset(new_crtc_state);
13583 	struct intel_plane_state *new_plane_state =
13584 		intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13585 						 to_intel_plane(crtc->primary));
13586 
13587 	if (modeset) {
13588 		update_scanline_offset(pipe_config);
13589 		dev_priv->display.crtc_enable(pipe_config, state);
13590 
13591 		/* vblanks work again, re-enable pipe CRC. */
13592 		intel_crtc_enable_pipe_crc(intel_crtc);
13593 	} else {
13594 		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13595 				       pipe_config);
13596 
13597 		if (pipe_config->update_pipe)
13598 			intel_encoders_update_pipe(crtc, pipe_config, state);
13599 	}
13600 
13601 	if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13602 		intel_fbc_disable(intel_crtc);
13603 	else if (new_plane_state)
13604 		intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
13605 
13606 	intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13607 
13608 	if (INTEL_GEN(dev_priv) >= 9)
13609 		skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13610 	else
13611 		i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13612 
13613 	intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
13614 }
13615 
13616 static void intel_update_crtcs(struct drm_atomic_state *state)
13617 {
13618 	struct drm_crtc *crtc;
13619 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13620 	int i;
13621 
13622 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13623 		if (!new_crtc_state->active)
13624 			continue;
13625 
13626 		intel_update_crtc(crtc, state, old_crtc_state,
13627 				  new_crtc_state);
13628 	}
13629 }
13630 
13631 static void skl_update_crtcs(struct drm_atomic_state *state)
13632 {
13633 	struct drm_i915_private *dev_priv = to_i915(state->dev);
13634 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13635 	struct drm_crtc *crtc;
13636 	struct intel_crtc *intel_crtc;
13637 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13638 	struct intel_crtc_state *cstate;
13639 	unsigned int updated = 0;
13640 	bool progress;
13641 	enum pipe pipe;
13642 	int i;
13643 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13644 	u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
13645 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13646 
13647 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13648 		/* ignore allocations for crtc's that have been turned off. */
13649 		if (new_crtc_state->active)
13650 			entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
13651 
13652 	/* If 2nd DBuf slice required, enable it here */
13653 	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13654 		icl_dbuf_slices_update(dev_priv, required_slices);
13655 
13656 	/*
13657 	 * Whenever the number of active pipes changes, we need to make sure we
13658 	 * update the pipes in the right order so that their ddb allocations
13659 	 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13660 	 * cause pipe underruns and other bad stuff.
13661 	 */
13662 	do {
13663 		progress = false;
13664 
13665 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13666 			bool vbl_wait = false;
13667 			unsigned int cmask = drm_crtc_mask(crtc);
13668 
13669 			intel_crtc = to_intel_crtc(crtc);
13670 			cstate = to_intel_crtc_state(new_crtc_state);
13671 			pipe = intel_crtc->pipe;
13672 
13673 			if (updated & cmask || !cstate->base.active)
13674 				continue;
13675 
13676 			if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
13677 							entries,
13678 							INTEL_INFO(dev_priv)->num_pipes, i))
13679 				continue;
13680 
13681 			updated |= cmask;
13682 			entries[i] = cstate->wm.skl.ddb;
13683 
13684 			/*
13685 			 * If this is an already active pipe, it's DDB changed,
13686 			 * and this isn't the last pipe that needs updating
13687 			 * then we need to wait for a vblank to pass for the
13688 			 * new ddb allocation to take effect.
13689 			 */
13690 			if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
13691 						 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
13692 			    !new_crtc_state->active_changed &&
13693 			    intel_state->wm_results.dirty_pipes != updated)
13694 				vbl_wait = true;
13695 
13696 			intel_update_crtc(crtc, state, old_crtc_state,
13697 					  new_crtc_state);
13698 
13699 			if (vbl_wait)
13700 				intel_wait_for_vblank(dev_priv, pipe);
13701 
13702 			progress = true;
13703 		}
13704 	} while (progress);
13705 
13706 	/* If 2nd DBuf slice is no more required disable it */
13707 	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13708 		icl_dbuf_slices_update(dev_priv, required_slices);
13709 }
13710 
13711 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13712 {
13713 	struct intel_atomic_state *state, *next;
13714 	struct llist_node *freed;
13715 
13716 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13717 	llist_for_each_entry_safe(state, next, freed, freed)
13718 		drm_atomic_state_put(&state->base);
13719 }
13720 
13721 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13722 {
13723 	struct drm_i915_private *dev_priv =
13724 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13725 
13726 	intel_atomic_helper_free_state(dev_priv);
13727 }
13728 
13729 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13730 {
13731 	struct wait_queue_entry wait_fence, wait_reset;
13732 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13733 
13734 	init_wait_entry(&wait_fence, 0);
13735 	init_wait_entry(&wait_reset, 0);
13736 	for (;;) {
13737 		prepare_to_wait(&intel_state->commit_ready.wait,
13738 				&wait_fence, TASK_UNINTERRUPTIBLE);
13739 		prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13740 				&wait_reset, TASK_UNINTERRUPTIBLE);
13741 
13742 
13743 		if (i915_sw_fence_done(&intel_state->commit_ready)
13744 		    || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13745 			break;
13746 
13747 		schedule();
13748 	}
13749 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13750 	finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13751 }
13752 
13753 static void intel_atomic_cleanup_work(struct work_struct *work)
13754 {
13755 	struct drm_atomic_state *state =
13756 		container_of(work, struct drm_atomic_state, commit_work);
13757 	struct drm_i915_private *i915 = to_i915(state->dev);
13758 
13759 	drm_atomic_helper_cleanup_planes(&i915->drm, state);
13760 	drm_atomic_helper_commit_cleanup_done(state);
13761 	drm_atomic_state_put(state);
13762 
13763 	intel_atomic_helper_free_state(i915);
13764 }
13765 
13766 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13767 {
13768 	struct drm_device *dev = state->dev;
13769 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13770 	struct drm_i915_private *dev_priv = to_i915(dev);
13771 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
13772 	struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
13773 	struct drm_crtc *crtc;
13774 	struct intel_crtc *intel_crtc;
13775 	u64 put_domains[I915_MAX_PIPES] = {};
13776 	intel_wakeref_t wakeref = 0;
13777 	int i;
13778 
13779 	intel_atomic_commit_fence_wait(intel_state);
13780 
13781 	drm_atomic_helper_wait_for_dependencies(state);
13782 
13783 	if (intel_state->modeset)
13784 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13785 
13786 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13787 		old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13788 		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13789 		intel_crtc = to_intel_crtc(crtc);
13790 
13791 		if (needs_modeset(new_crtc_state) ||
13792 		    to_intel_crtc_state(new_crtc_state)->update_pipe) {
13793 
13794 			put_domains[intel_crtc->pipe] =
13795 				modeset_get_crtc_power_domains(crtc,
13796 					new_intel_crtc_state);
13797 		}
13798 
13799 		if (!needs_modeset(new_crtc_state))
13800 			continue;
13801 
13802 		intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
13803 
13804 		if (old_crtc_state->active) {
13805 			intel_crtc_disable_planes(intel_state, intel_crtc);
13806 
13807 			/*
13808 			 * We need to disable pipe CRC before disabling the pipe,
13809 			 * or we race against vblank off.
13810 			 */
13811 			intel_crtc_disable_pipe_crc(intel_crtc);
13812 
13813 			dev_priv->display.crtc_disable(old_intel_crtc_state, state);
13814 			intel_crtc->active = false;
13815 			intel_fbc_disable(intel_crtc);
13816 			intel_disable_shared_dpll(old_intel_crtc_state);
13817 
13818 			/*
13819 			 * Underruns don't always raise
13820 			 * interrupts, so check manually.
13821 			 */
13822 			intel_check_cpu_fifo_underruns(dev_priv);
13823 			intel_check_pch_fifo_underruns(dev_priv);
13824 
13825 			/* FIXME unify this for all platforms */
13826 			if (!new_crtc_state->active &&
13827 			    !HAS_GMCH(dev_priv) &&
13828 			    dev_priv->display.initial_watermarks)
13829 				dev_priv->display.initial_watermarks(intel_state,
13830 								     new_intel_crtc_state);
13831 		}
13832 	}
13833 
13834 	/* FIXME: Eventually get rid of our intel_crtc->config pointer */
13835 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13836 		to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
13837 
13838 	if (intel_state->modeset) {
13839 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13840 
13841 		intel_set_cdclk_pre_plane_update(dev_priv,
13842 						 &intel_state->cdclk.actual,
13843 						 &dev_priv->cdclk.actual,
13844 						 intel_state->cdclk.pipe);
13845 
13846 		/*
13847 		 * SKL workaround: bspec recommends we disable the SAGV when we
13848 		 * have more then one pipe enabled
13849 		 */
13850 		if (!intel_can_enable_sagv(state))
13851 			intel_disable_sagv(dev_priv);
13852 
13853 		intel_modeset_verify_disabled(dev, state);
13854 	}
13855 
13856 	/* Complete the events for pipes that have now been disabled */
13857 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13858 		bool modeset = needs_modeset(new_crtc_state);
13859 
13860 		/* Complete events for now disable pipes here. */
13861 		if (modeset && !new_crtc_state->active && new_crtc_state->event) {
13862 			spin_lock_irq(&dev->event_lock);
13863 			drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
13864 			spin_unlock_irq(&dev->event_lock);
13865 
13866 			new_crtc_state->event = NULL;
13867 		}
13868 	}
13869 
13870 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13871 	dev_priv->display.update_crtcs(state);
13872 
13873 	if (intel_state->modeset)
13874 		intel_set_cdclk_post_plane_update(dev_priv,
13875 						  &intel_state->cdclk.actual,
13876 						  &dev_priv->cdclk.actual,
13877 						  intel_state->cdclk.pipe);
13878 
13879 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13880 	 * already, but still need the state for the delayed optimization. To
13881 	 * fix this:
13882 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13883 	 * - schedule that vblank worker _before_ calling hw_done
13884 	 * - at the start of commit_tail, cancel it _synchrously
13885 	 * - switch over to the vblank wait helper in the core after that since
13886 	 *   we don't need out special handling any more.
13887 	 */
13888 	drm_atomic_helper_wait_for_flip_done(dev, state);
13889 
13890 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13891 		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13892 
13893 		if (new_crtc_state->active &&
13894 		    !needs_modeset(new_crtc_state) &&
13895 		    (new_intel_crtc_state->base.color_mgmt_changed ||
13896 		     new_intel_crtc_state->update_pipe))
13897 			intel_color_load_luts(new_intel_crtc_state);
13898 	}
13899 
13900 	/*
13901 	 * Now that the vblank has passed, we can go ahead and program the
13902 	 * optimal watermarks on platforms that need two-step watermark
13903 	 * programming.
13904 	 *
13905 	 * TODO: Move this (and other cleanup) to an async worker eventually.
13906 	 */
13907 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13908 		new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13909 
13910 		if (dev_priv->display.optimize_watermarks)
13911 			dev_priv->display.optimize_watermarks(intel_state,
13912 							      new_intel_crtc_state);
13913 	}
13914 
13915 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13916 		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13917 
13918 		if (put_domains[i])
13919 			modeset_put_power_domains(dev_priv, put_domains[i]);
13920 
13921 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13922 	}
13923 
13924 	if (intel_state->modeset)
13925 		intel_verify_planes(intel_state);
13926 
13927 	if (intel_state->modeset && intel_can_enable_sagv(state))
13928 		intel_enable_sagv(dev_priv);
13929 
13930 	drm_atomic_helper_commit_hw_done(state);
13931 
13932 	if (intel_state->modeset) {
13933 		/* As one of the primary mmio accessors, KMS has a high
13934 		 * likelihood of triggering bugs in unclaimed access. After we
13935 		 * finish modesetting, see if an error has been flagged, and if
13936 		 * so enable debugging for the next modeset - and hope we catch
13937 		 * the culprit.
13938 		 */
13939 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13940 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13941 	}
13942 	intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
13943 
13944 	/*
13945 	 * Defer the cleanup of the old state to a separate worker to not
13946 	 * impede the current task (userspace for blocking modesets) that
13947 	 * are executed inline. For out-of-line asynchronous modesets/flips,
13948 	 * deferring to a new worker seems overkill, but we would place a
13949 	 * schedule point (cond_resched()) here anyway to keep latencies
13950 	 * down.
13951 	 */
13952 	INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
13953 	queue_work(system_highpri_wq, &state->commit_work);
13954 }
13955 
13956 static void intel_atomic_commit_work(struct work_struct *work)
13957 {
13958 	struct drm_atomic_state *state =
13959 		container_of(work, struct drm_atomic_state, commit_work);
13960 
13961 	intel_atomic_commit_tail(state);
13962 }
13963 
13964 static int __i915_sw_fence_call
13965 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13966 			  enum i915_sw_fence_notify notify)
13967 {
13968 	struct intel_atomic_state *state =
13969 		container_of(fence, struct intel_atomic_state, commit_ready);
13970 
13971 	switch (notify) {
13972 	case FENCE_COMPLETE:
13973 		/* we do blocking waits in the worker, nothing to do here */
13974 		break;
13975 	case FENCE_FREE:
13976 		{
13977 			struct intel_atomic_helper *helper =
13978 				&to_i915(state->base.dev)->atomic_helper;
13979 
13980 			if (llist_add(&state->freed, &helper->free_list))
13981 				schedule_work(&helper->free_work);
13982 			break;
13983 		}
13984 	}
13985 
13986 	return NOTIFY_DONE;
13987 }
13988 
13989 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13990 {
13991 	struct drm_plane_state *old_plane_state, *new_plane_state;
13992 	struct drm_plane *plane;
13993 	int i;
13994 
13995 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13996 		i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13997 				  intel_fb_obj(new_plane_state->fb),
13998 				  to_intel_plane(plane)->frontbuffer_bit);
13999 }
14000 
14001 /**
14002  * intel_atomic_commit - commit validated state object
14003  * @dev: DRM device
14004  * @state: the top-level driver state object
14005  * @nonblock: nonblocking commit
14006  *
14007  * This function commits a top-level state object that has been validated
14008  * with drm_atomic_helper_check().
14009  *
14010  * RETURNS
14011  * Zero for success or -errno.
14012  */
14013 static int intel_atomic_commit(struct drm_device *dev,
14014 			       struct drm_atomic_state *state,
14015 			       bool nonblock)
14016 {
14017 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14018 	struct drm_i915_private *dev_priv = to_i915(dev);
14019 	int ret = 0;
14020 
14021 	intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
14022 
14023 	drm_atomic_state_get(state);
14024 	i915_sw_fence_init(&intel_state->commit_ready,
14025 			   intel_atomic_commit_ready);
14026 
14027 	/*
14028 	 * The intel_legacy_cursor_update() fast path takes care
14029 	 * of avoiding the vblank waits for simple cursor
14030 	 * movement and flips. For cursor on/off and size changes,
14031 	 * we want to perform the vblank waits so that watermark
14032 	 * updates happen during the correct frames. Gen9+ have
14033 	 * double buffered watermarks and so shouldn't need this.
14034 	 *
14035 	 * Unset state->legacy_cursor_update before the call to
14036 	 * drm_atomic_helper_setup_commit() because otherwise
14037 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
14038 	 * we get FIFO underruns because we didn't wait
14039 	 * for vblank.
14040 	 *
14041 	 * FIXME doing watermarks and fb cleanup from a vblank worker
14042 	 * (assuming we had any) would solve these problems.
14043 	 */
14044 	if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
14045 		struct intel_crtc_state *new_crtc_state;
14046 		struct intel_crtc *crtc;
14047 		int i;
14048 
14049 		for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
14050 			if (new_crtc_state->wm.need_postvbl_update ||
14051 			    new_crtc_state->update_wm_post)
14052 				state->legacy_cursor_update = false;
14053 	}
14054 
14055 	ret = intel_atomic_prepare_commit(dev, state);
14056 	if (ret) {
14057 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14058 		i915_sw_fence_commit(&intel_state->commit_ready);
14059 		intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
14060 		return ret;
14061 	}
14062 
14063 	ret = drm_atomic_helper_setup_commit(state, nonblock);
14064 	if (!ret)
14065 		ret = drm_atomic_helper_swap_state(state, true);
14066 
14067 	if (ret) {
14068 		i915_sw_fence_commit(&intel_state->commit_ready);
14069 
14070 		drm_atomic_helper_cleanup_planes(dev, state);
14071 		intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
14072 		return ret;
14073 	}
14074 	dev_priv->wm.distrust_bios_wm = false;
14075 	intel_shared_dpll_swap_state(state);
14076 	intel_atomic_track_fbs(state);
14077 
14078 	if (intel_state->modeset) {
14079 		memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
14080 		       sizeof(intel_state->min_cdclk));
14081 		memcpy(dev_priv->min_voltage_level,
14082 		       intel_state->min_voltage_level,
14083 		       sizeof(intel_state->min_voltage_level));
14084 		dev_priv->active_crtcs = intel_state->active_crtcs;
14085 		dev_priv->cdclk.force_min_cdclk =
14086 			intel_state->cdclk.force_min_cdclk;
14087 
14088 		intel_cdclk_swap_state(intel_state);
14089 	}
14090 
14091 	drm_atomic_state_get(state);
14092 	INIT_WORK(&state->commit_work, intel_atomic_commit_work);
14093 
14094 	i915_sw_fence_commit(&intel_state->commit_ready);
14095 	if (nonblock && intel_state->modeset) {
14096 		queue_work(dev_priv->modeset_wq, &state->commit_work);
14097 	} else if (nonblock) {
14098 		queue_work(system_unbound_wq, &state->commit_work);
14099 	} else {
14100 		if (intel_state->modeset)
14101 			flush_workqueue(dev_priv->modeset_wq);
14102 		intel_atomic_commit_tail(state);
14103 	}
14104 
14105 	return 0;
14106 }
14107 
14108 static const struct drm_crtc_funcs intel_crtc_funcs = {
14109 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
14110 	.set_config = drm_atomic_helper_set_config,
14111 	.destroy = intel_crtc_destroy,
14112 	.page_flip = drm_atomic_helper_page_flip,
14113 	.atomic_duplicate_state = intel_crtc_duplicate_state,
14114 	.atomic_destroy_state = intel_crtc_destroy_state,
14115 	.set_crc_source = intel_crtc_set_crc_source,
14116 	.verify_crc_source = intel_crtc_verify_crc_source,
14117 	.get_crc_sources = intel_crtc_get_crc_sources,
14118 };
14119 
14120 struct wait_rps_boost {
14121 	struct wait_queue_entry wait;
14122 
14123 	struct drm_crtc *crtc;
14124 	struct i915_request *request;
14125 };
14126 
14127 static int do_rps_boost(struct wait_queue_entry *_wait,
14128 			unsigned mode, int sync, void *key)
14129 {
14130 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14131 	struct i915_request *rq = wait->request;
14132 
14133 	/*
14134 	 * If we missed the vblank, but the request is already running it
14135 	 * is reasonable to assume that it will complete before the next
14136 	 * vblank without our intervention, so leave RPS alone.
14137 	 */
14138 	if (!i915_request_started(rq))
14139 		gen6_rps_boost(rq);
14140 	i915_request_put(rq);
14141 
14142 	drm_crtc_vblank_put(wait->crtc);
14143 
14144 	list_del(&wait->wait.entry);
14145 	kfree(wait);
14146 	return 1;
14147 }
14148 
14149 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14150 				       struct dma_fence *fence)
14151 {
14152 	struct wait_rps_boost *wait;
14153 
14154 	if (!dma_fence_is_i915(fence))
14155 		return;
14156 
14157 	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14158 		return;
14159 
14160 	if (drm_crtc_vblank_get(crtc))
14161 		return;
14162 
14163 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14164 	if (!wait) {
14165 		drm_crtc_vblank_put(crtc);
14166 		return;
14167 	}
14168 
14169 	wait->request = to_request(dma_fence_get(fence));
14170 	wait->crtc = crtc;
14171 
14172 	wait->wait.func = do_rps_boost;
14173 	wait->wait.flags = 0;
14174 
14175 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14176 }
14177 
14178 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14179 {
14180 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14181 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14182 	struct drm_framebuffer *fb = plane_state->base.fb;
14183 	struct i915_vma *vma;
14184 
14185 	if (plane->id == PLANE_CURSOR &&
14186 	    INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14187 		struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14188 		const int align = intel_cursor_alignment(dev_priv);
14189 		int err;
14190 
14191 		err = i915_gem_object_attach_phys(obj, align);
14192 		if (err)
14193 			return err;
14194 	}
14195 
14196 	vma = intel_pin_and_fence_fb_obj(fb,
14197 					 &plane_state->view,
14198 					 intel_plane_uses_fence(plane_state),
14199 					 &plane_state->flags);
14200 	if (IS_ERR(vma))
14201 		return PTR_ERR(vma);
14202 
14203 	plane_state->vma = vma;
14204 
14205 	return 0;
14206 }
14207 
14208 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14209 {
14210 	struct i915_vma *vma;
14211 
14212 	vma = fetch_and_zero(&old_plane_state->vma);
14213 	if (vma)
14214 		intel_unpin_fb_vma(vma, old_plane_state->flags);
14215 }
14216 
14217 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14218 {
14219 	struct i915_sched_attr attr = {
14220 		.priority = I915_PRIORITY_DISPLAY,
14221 	};
14222 
14223 	i915_gem_object_wait_priority(obj, 0, &attr);
14224 }
14225 
14226 /**
14227  * intel_prepare_plane_fb - Prepare fb for usage on plane
14228  * @plane: drm plane to prepare for
14229  * @new_state: the plane state being prepared
14230  *
14231  * Prepares a framebuffer for usage on a display plane.  Generally this
14232  * involves pinning the underlying object and updating the frontbuffer tracking
14233  * bits.  Some older platforms need special physical address handling for
14234  * cursor planes.
14235  *
14236  * Must be called with struct_mutex held.
14237  *
14238  * Returns 0 on success, negative error code on failure.
14239  */
14240 int
14241 intel_prepare_plane_fb(struct drm_plane *plane,
14242 		       struct drm_plane_state *new_state)
14243 {
14244 	struct intel_atomic_state *intel_state =
14245 		to_intel_atomic_state(new_state->state);
14246 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
14247 	struct drm_framebuffer *fb = new_state->fb;
14248 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14249 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14250 	int ret;
14251 
14252 	if (old_obj) {
14253 		struct drm_crtc_state *crtc_state =
14254 			drm_atomic_get_new_crtc_state(new_state->state,
14255 						      plane->state->crtc);
14256 
14257 		/* Big Hammer, we also need to ensure that any pending
14258 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14259 		 * current scanout is retired before unpinning the old
14260 		 * framebuffer. Note that we rely on userspace rendering
14261 		 * into the buffer attached to the pipe they are waiting
14262 		 * on. If not, userspace generates a GPU hang with IPEHR
14263 		 * point to the MI_WAIT_FOR_EVENT.
14264 		 *
14265 		 * This should only fail upon a hung GPU, in which case we
14266 		 * can safely continue.
14267 		 */
14268 		if (needs_modeset(crtc_state)) {
14269 			ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14270 							      old_obj->base.resv, NULL,
14271 							      false, 0,
14272 							      GFP_KERNEL);
14273 			if (ret < 0)
14274 				return ret;
14275 		}
14276 	}
14277 
14278 	if (new_state->fence) { /* explicit fencing */
14279 		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14280 						    new_state->fence,
14281 						    I915_FENCE_TIMEOUT,
14282 						    GFP_KERNEL);
14283 		if (ret < 0)
14284 			return ret;
14285 	}
14286 
14287 	if (!obj)
14288 		return 0;
14289 
14290 	ret = i915_gem_object_pin_pages(obj);
14291 	if (ret)
14292 		return ret;
14293 
14294 	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14295 	if (ret) {
14296 		i915_gem_object_unpin_pages(obj);
14297 		return ret;
14298 	}
14299 
14300 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14301 
14302 	mutex_unlock(&dev_priv->drm.struct_mutex);
14303 	i915_gem_object_unpin_pages(obj);
14304 	if (ret)
14305 		return ret;
14306 
14307 	fb_obj_bump_render_priority(obj);
14308 	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14309 
14310 	if (!new_state->fence) { /* implicit fencing */
14311 		struct dma_fence *fence;
14312 
14313 		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14314 						      obj->base.resv, NULL,
14315 						      false, I915_FENCE_TIMEOUT,
14316 						      GFP_KERNEL);
14317 		if (ret < 0)
14318 			return ret;
14319 
14320 		fence = reservation_object_get_excl_rcu(obj->base.resv);
14321 		if (fence) {
14322 			add_rps_boost_after_vblank(new_state->crtc, fence);
14323 			dma_fence_put(fence);
14324 		}
14325 	} else {
14326 		add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14327 	}
14328 
14329 	/*
14330 	 * We declare pageflips to be interactive and so merit a small bias
14331 	 * towards upclocking to deliver the frame on time. By only changing
14332 	 * the RPS thresholds to sample more regularly and aim for higher
14333 	 * clocks we can hopefully deliver low power workloads (like kodi)
14334 	 * that are not quite steady state without resorting to forcing
14335 	 * maximum clocks following a vblank miss (see do_rps_boost()).
14336 	 */
14337 	if (!intel_state->rps_interactive) {
14338 		intel_rps_mark_interactive(dev_priv, true);
14339 		intel_state->rps_interactive = true;
14340 	}
14341 
14342 	return 0;
14343 }
14344 
14345 /**
14346  * intel_cleanup_plane_fb - Cleans up an fb after plane use
14347  * @plane: drm plane to clean up for
14348  * @old_state: the state from the previous modeset
14349  *
14350  * Cleans up a framebuffer that has just been removed from a plane.
14351  *
14352  * Must be called with struct_mutex held.
14353  */
14354 void
14355 intel_cleanup_plane_fb(struct drm_plane *plane,
14356 		       struct drm_plane_state *old_state)
14357 {
14358 	struct intel_atomic_state *intel_state =
14359 		to_intel_atomic_state(old_state->state);
14360 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
14361 
14362 	if (intel_state->rps_interactive) {
14363 		intel_rps_mark_interactive(dev_priv, false);
14364 		intel_state->rps_interactive = false;
14365 	}
14366 
14367 	/* Should only be called after a successful intel_prepare_plane_fb()! */
14368 	mutex_lock(&dev_priv->drm.struct_mutex);
14369 	intel_plane_unpin_fb(to_intel_plane_state(old_state));
14370 	mutex_unlock(&dev_priv->drm.struct_mutex);
14371 }
14372 
14373 int
14374 skl_max_scale(const struct intel_crtc_state *crtc_state,
14375 	      u32 pixel_format)
14376 {
14377 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14378 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14379 	int max_scale, mult;
14380 	int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14381 
14382 	if (!crtc_state->base.enable)
14383 		return DRM_PLANE_HELPER_NO_SCALING;
14384 
14385 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14386 	max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14387 
14388 	if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14389 		max_dotclk *= 2;
14390 
14391 	if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14392 		return DRM_PLANE_HELPER_NO_SCALING;
14393 
14394 	/*
14395 	 * skl max scale is lower of:
14396 	 *    close to 3 but not 3, -1 is for that purpose
14397 	 *            or
14398 	 *    cdclk/crtc_clock
14399 	 */
14400 	mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14401 	tmpclk1 = (1 << 16) * mult - 1;
14402 	tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14403 	max_scale = min(tmpclk1, tmpclk2);
14404 
14405 	return max_scale;
14406 }
14407 
14408 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14409 				    struct intel_crtc *crtc)
14410 {
14411 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14412 	struct intel_crtc_state *old_crtc_state =
14413 		intel_atomic_get_old_crtc_state(state, crtc);
14414 	struct intel_crtc_state *new_crtc_state =
14415 		intel_atomic_get_new_crtc_state(state, crtc);
14416 	bool modeset = needs_modeset(&new_crtc_state->base);
14417 
14418 	/* Perform vblank evasion around commit operation */
14419 	intel_pipe_update_start(new_crtc_state);
14420 
14421 	if (modeset)
14422 		goto out;
14423 
14424 	if (new_crtc_state->base.color_mgmt_changed ||
14425 	    new_crtc_state->update_pipe)
14426 		intel_color_commit(new_crtc_state);
14427 
14428 	if (new_crtc_state->update_pipe)
14429 		intel_update_pipe_config(old_crtc_state, new_crtc_state);
14430 	else if (INTEL_GEN(dev_priv) >= 9)
14431 		skl_detach_scalers(new_crtc_state);
14432 
14433 	if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14434 		bdw_set_pipemisc(new_crtc_state);
14435 
14436 out:
14437 	if (dev_priv->display.atomic_update_watermarks)
14438 		dev_priv->display.atomic_update_watermarks(state,
14439 							   new_crtc_state);
14440 }
14441 
14442 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14443 				  struct intel_crtc_state *crtc_state)
14444 {
14445 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14446 
14447 	if (!IS_GEN(dev_priv, 2))
14448 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14449 
14450 	if (crtc_state->has_pch_encoder) {
14451 		enum pipe pch_transcoder =
14452 			intel_crtc_pch_transcoder(crtc);
14453 
14454 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14455 	}
14456 }
14457 
14458 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14459 				     struct intel_crtc *crtc)
14460 {
14461 	struct intel_crtc_state *old_crtc_state =
14462 		intel_atomic_get_old_crtc_state(state, crtc);
14463 	struct intel_crtc_state *new_crtc_state =
14464 		intel_atomic_get_new_crtc_state(state, crtc);
14465 
14466 	intel_pipe_update_end(new_crtc_state);
14467 
14468 	if (new_crtc_state->update_pipe &&
14469 	    !needs_modeset(&new_crtc_state->base) &&
14470 	    old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14471 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14472 }
14473 
14474 /**
14475  * intel_plane_destroy - destroy a plane
14476  * @plane: plane to destroy
14477  *
14478  * Common destruction function for all types of planes (primary, cursor,
14479  * sprite).
14480  */
14481 void intel_plane_destroy(struct drm_plane *plane)
14482 {
14483 	drm_plane_cleanup(plane);
14484 	kfree(to_intel_plane(plane));
14485 }
14486 
14487 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14488 					    u32 format, u64 modifier)
14489 {
14490 	switch (modifier) {
14491 	case DRM_FORMAT_MOD_LINEAR:
14492 	case I915_FORMAT_MOD_X_TILED:
14493 		break;
14494 	default:
14495 		return false;
14496 	}
14497 
14498 	switch (format) {
14499 	case DRM_FORMAT_C8:
14500 	case DRM_FORMAT_RGB565:
14501 	case DRM_FORMAT_XRGB1555:
14502 	case DRM_FORMAT_XRGB8888:
14503 		return modifier == DRM_FORMAT_MOD_LINEAR ||
14504 			modifier == I915_FORMAT_MOD_X_TILED;
14505 	default:
14506 		return false;
14507 	}
14508 }
14509 
14510 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14511 					    u32 format, u64 modifier)
14512 {
14513 	switch (modifier) {
14514 	case DRM_FORMAT_MOD_LINEAR:
14515 	case I915_FORMAT_MOD_X_TILED:
14516 		break;
14517 	default:
14518 		return false;
14519 	}
14520 
14521 	switch (format) {
14522 	case DRM_FORMAT_C8:
14523 	case DRM_FORMAT_RGB565:
14524 	case DRM_FORMAT_XRGB8888:
14525 	case DRM_FORMAT_XBGR8888:
14526 	case DRM_FORMAT_XRGB2101010:
14527 	case DRM_FORMAT_XBGR2101010:
14528 		return modifier == DRM_FORMAT_MOD_LINEAR ||
14529 			modifier == I915_FORMAT_MOD_X_TILED;
14530 	default:
14531 		return false;
14532 	}
14533 }
14534 
14535 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14536 					      u32 format, u64 modifier)
14537 {
14538 	return modifier == DRM_FORMAT_MOD_LINEAR &&
14539 		format == DRM_FORMAT_ARGB8888;
14540 }
14541 
14542 static const struct drm_plane_funcs i965_plane_funcs = {
14543 	.update_plane = drm_atomic_helper_update_plane,
14544 	.disable_plane = drm_atomic_helper_disable_plane,
14545 	.destroy = intel_plane_destroy,
14546 	.atomic_duplicate_state = intel_plane_duplicate_state,
14547 	.atomic_destroy_state = intel_plane_destroy_state,
14548 	.format_mod_supported = i965_plane_format_mod_supported,
14549 };
14550 
14551 static const struct drm_plane_funcs i8xx_plane_funcs = {
14552 	.update_plane = drm_atomic_helper_update_plane,
14553 	.disable_plane = drm_atomic_helper_disable_plane,
14554 	.destroy = intel_plane_destroy,
14555 	.atomic_duplicate_state = intel_plane_duplicate_state,
14556 	.atomic_destroy_state = intel_plane_destroy_state,
14557 	.format_mod_supported = i8xx_plane_format_mod_supported,
14558 };
14559 
14560 static int
14561 intel_legacy_cursor_update(struct drm_plane *plane,
14562 			   struct drm_crtc *crtc,
14563 			   struct drm_framebuffer *fb,
14564 			   int crtc_x, int crtc_y,
14565 			   unsigned int crtc_w, unsigned int crtc_h,
14566 			   u32 src_x, u32 src_y,
14567 			   u32 src_w, u32 src_h,
14568 			   struct drm_modeset_acquire_ctx *ctx)
14569 {
14570 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14571 	int ret;
14572 	struct drm_plane_state *old_plane_state, *new_plane_state;
14573 	struct intel_plane *intel_plane = to_intel_plane(plane);
14574 	struct drm_framebuffer *old_fb;
14575 	struct intel_crtc_state *crtc_state =
14576 		to_intel_crtc_state(crtc->state);
14577 	struct intel_crtc_state *new_crtc_state;
14578 
14579 	/*
14580 	 * When crtc is inactive or there is a modeset pending,
14581 	 * wait for it to complete in the slowpath
14582 	 */
14583 	if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14584 	    crtc_state->update_pipe)
14585 		goto slow;
14586 
14587 	old_plane_state = plane->state;
14588 	/*
14589 	 * Don't do an async update if there is an outstanding commit modifying
14590 	 * the plane.  This prevents our async update's changes from getting
14591 	 * overridden by a previous synchronous update's state.
14592 	 */
14593 	if (old_plane_state->commit &&
14594 	    !try_wait_for_completion(&old_plane_state->commit->hw_done))
14595 		goto slow;
14596 
14597 	/*
14598 	 * If any parameters change that may affect watermarks,
14599 	 * take the slowpath. Only changing fb or position should be
14600 	 * in the fastpath.
14601 	 */
14602 	if (old_plane_state->crtc != crtc ||
14603 	    old_plane_state->src_w != src_w ||
14604 	    old_plane_state->src_h != src_h ||
14605 	    old_plane_state->crtc_w != crtc_w ||
14606 	    old_plane_state->crtc_h != crtc_h ||
14607 	    !old_plane_state->fb != !fb)
14608 		goto slow;
14609 
14610 	new_plane_state = intel_plane_duplicate_state(plane);
14611 	if (!new_plane_state)
14612 		return -ENOMEM;
14613 
14614 	new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14615 	if (!new_crtc_state) {
14616 		ret = -ENOMEM;
14617 		goto out_free;
14618 	}
14619 
14620 	drm_atomic_set_fb_for_plane(new_plane_state, fb);
14621 
14622 	new_plane_state->src_x = src_x;
14623 	new_plane_state->src_y = src_y;
14624 	new_plane_state->src_w = src_w;
14625 	new_plane_state->src_h = src_h;
14626 	new_plane_state->crtc_x = crtc_x;
14627 	new_plane_state->crtc_y = crtc_y;
14628 	new_plane_state->crtc_w = crtc_w;
14629 	new_plane_state->crtc_h = crtc_h;
14630 
14631 	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14632 						  to_intel_plane_state(old_plane_state),
14633 						  to_intel_plane_state(new_plane_state));
14634 	if (ret)
14635 		goto out_free;
14636 
14637 	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14638 	if (ret)
14639 		goto out_free;
14640 
14641 	ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14642 	if (ret)
14643 		goto out_unlock;
14644 
14645 	intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14646 
14647 	old_fb = old_plane_state->fb;
14648 	i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14649 			  intel_plane->frontbuffer_bit);
14650 
14651 	/* Swap plane state */
14652 	plane->state = new_plane_state;
14653 
14654 	/*
14655 	 * We cannot swap crtc_state as it may be in use by an atomic commit or
14656 	 * page flip that's running simultaneously. If we swap crtc_state and
14657 	 * destroy the old state, we will cause a use-after-free there.
14658 	 *
14659 	 * Only update active_planes, which is needed for our internal
14660 	 * bookkeeping. Either value will do the right thing when updating
14661 	 * planes atomically. If the cursor was part of the atomic update then
14662 	 * we would have taken the slowpath.
14663 	 */
14664 	crtc_state->active_planes = new_crtc_state->active_planes;
14665 
14666 	if (plane->state->visible)
14667 		intel_update_plane(intel_plane, crtc_state,
14668 				   to_intel_plane_state(plane->state));
14669 	else
14670 		intel_disable_plane(intel_plane, crtc_state);
14671 
14672 	intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14673 
14674 out_unlock:
14675 	mutex_unlock(&dev_priv->drm.struct_mutex);
14676 out_free:
14677 	if (new_crtc_state)
14678 		intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14679 	if (ret)
14680 		intel_plane_destroy_state(plane, new_plane_state);
14681 	else
14682 		intel_plane_destroy_state(plane, old_plane_state);
14683 	return ret;
14684 
14685 slow:
14686 	return drm_atomic_helper_update_plane(plane, crtc, fb,
14687 					      crtc_x, crtc_y, crtc_w, crtc_h,
14688 					      src_x, src_y, src_w, src_h, ctx);
14689 }
14690 
14691 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14692 	.update_plane = intel_legacy_cursor_update,
14693 	.disable_plane = drm_atomic_helper_disable_plane,
14694 	.destroy = intel_plane_destroy,
14695 	.atomic_duplicate_state = intel_plane_duplicate_state,
14696 	.atomic_destroy_state = intel_plane_destroy_state,
14697 	.format_mod_supported = intel_cursor_format_mod_supported,
14698 };
14699 
14700 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14701 			       enum i9xx_plane_id i9xx_plane)
14702 {
14703 	if (!HAS_FBC(dev_priv))
14704 		return false;
14705 
14706 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14707 		return i9xx_plane == PLANE_A; /* tied to pipe A */
14708 	else if (IS_IVYBRIDGE(dev_priv))
14709 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14710 			i9xx_plane == PLANE_C;
14711 	else if (INTEL_GEN(dev_priv) >= 4)
14712 		return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14713 	else
14714 		return i9xx_plane == PLANE_A;
14715 }
14716 
14717 static struct intel_plane *
14718 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14719 {
14720 	struct intel_plane *plane;
14721 	const struct drm_plane_funcs *plane_funcs;
14722 	unsigned int supported_rotations;
14723 	unsigned int possible_crtcs;
14724 	const u64 *modifiers;
14725 	const u32 *formats;
14726 	int num_formats;
14727 	int ret;
14728 
14729 	if (INTEL_GEN(dev_priv) >= 9)
14730 		return skl_universal_plane_create(dev_priv, pipe,
14731 						  PLANE_PRIMARY);
14732 
14733 	plane = intel_plane_alloc();
14734 	if (IS_ERR(plane))
14735 		return plane;
14736 
14737 	plane->pipe = pipe;
14738 	/*
14739 	 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14740 	 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14741 	 */
14742 	if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14743 		plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14744 	else
14745 		plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14746 	plane->id = PLANE_PRIMARY;
14747 	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14748 
14749 	plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14750 	if (plane->has_fbc) {
14751 		struct intel_fbc *fbc = &dev_priv->fbc;
14752 
14753 		fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14754 	}
14755 
14756 	if (INTEL_GEN(dev_priv) >= 4) {
14757 		formats = i965_primary_formats;
14758 		num_formats = ARRAY_SIZE(i965_primary_formats);
14759 		modifiers = i9xx_format_modifiers;
14760 
14761 		plane->max_stride = i9xx_plane_max_stride;
14762 		plane->update_plane = i9xx_update_plane;
14763 		plane->disable_plane = i9xx_disable_plane;
14764 		plane->get_hw_state = i9xx_plane_get_hw_state;
14765 		plane->check_plane = i9xx_plane_check;
14766 
14767 		plane_funcs = &i965_plane_funcs;
14768 	} else {
14769 		formats = i8xx_primary_formats;
14770 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
14771 		modifiers = i9xx_format_modifiers;
14772 
14773 		plane->max_stride = i9xx_plane_max_stride;
14774 		plane->update_plane = i9xx_update_plane;
14775 		plane->disable_plane = i9xx_disable_plane;
14776 		plane->get_hw_state = i9xx_plane_get_hw_state;
14777 		plane->check_plane = i9xx_plane_check;
14778 
14779 		plane_funcs = &i8xx_plane_funcs;
14780 	}
14781 
14782 	possible_crtcs = BIT(pipe);
14783 
14784 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14785 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14786 					       possible_crtcs, plane_funcs,
14787 					       formats, num_formats, modifiers,
14788 					       DRM_PLANE_TYPE_PRIMARY,
14789 					       "primary %c", pipe_name(pipe));
14790 	else
14791 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14792 					       possible_crtcs, plane_funcs,
14793 					       formats, num_formats, modifiers,
14794 					       DRM_PLANE_TYPE_PRIMARY,
14795 					       "plane %c",
14796 					       plane_name(plane->i9xx_plane));
14797 	if (ret)
14798 		goto fail;
14799 
14800 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14801 		supported_rotations =
14802 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14803 			DRM_MODE_REFLECT_X;
14804 	} else if (INTEL_GEN(dev_priv) >= 4) {
14805 		supported_rotations =
14806 			DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14807 	} else {
14808 		supported_rotations = DRM_MODE_ROTATE_0;
14809 	}
14810 
14811 	if (INTEL_GEN(dev_priv) >= 4)
14812 		drm_plane_create_rotation_property(&plane->base,
14813 						   DRM_MODE_ROTATE_0,
14814 						   supported_rotations);
14815 
14816 	drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14817 
14818 	return plane;
14819 
14820 fail:
14821 	intel_plane_free(plane);
14822 
14823 	return ERR_PTR(ret);
14824 }
14825 
14826 static struct intel_plane *
14827 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14828 			  enum pipe pipe)
14829 {
14830 	unsigned int possible_crtcs;
14831 	struct intel_plane *cursor;
14832 	int ret;
14833 
14834 	cursor = intel_plane_alloc();
14835 	if (IS_ERR(cursor))
14836 		return cursor;
14837 
14838 	cursor->pipe = pipe;
14839 	cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14840 	cursor->id = PLANE_CURSOR;
14841 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14842 
14843 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14844 		cursor->max_stride = i845_cursor_max_stride;
14845 		cursor->update_plane = i845_update_cursor;
14846 		cursor->disable_plane = i845_disable_cursor;
14847 		cursor->get_hw_state = i845_cursor_get_hw_state;
14848 		cursor->check_plane = i845_check_cursor;
14849 	} else {
14850 		cursor->max_stride = i9xx_cursor_max_stride;
14851 		cursor->update_plane = i9xx_update_cursor;
14852 		cursor->disable_plane = i9xx_disable_cursor;
14853 		cursor->get_hw_state = i9xx_cursor_get_hw_state;
14854 		cursor->check_plane = i9xx_check_cursor;
14855 	}
14856 
14857 	cursor->cursor.base = ~0;
14858 	cursor->cursor.cntl = ~0;
14859 
14860 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14861 		cursor->cursor.size = ~0;
14862 
14863 	possible_crtcs = BIT(pipe);
14864 
14865 	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14866 				       possible_crtcs, &intel_cursor_plane_funcs,
14867 				       intel_cursor_formats,
14868 				       ARRAY_SIZE(intel_cursor_formats),
14869 				       cursor_format_modifiers,
14870 				       DRM_PLANE_TYPE_CURSOR,
14871 				       "cursor %c", pipe_name(pipe));
14872 	if (ret)
14873 		goto fail;
14874 
14875 	if (INTEL_GEN(dev_priv) >= 4)
14876 		drm_plane_create_rotation_property(&cursor->base,
14877 						   DRM_MODE_ROTATE_0,
14878 						   DRM_MODE_ROTATE_0 |
14879 						   DRM_MODE_ROTATE_180);
14880 
14881 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14882 
14883 	return cursor;
14884 
14885 fail:
14886 	intel_plane_free(cursor);
14887 
14888 	return ERR_PTR(ret);
14889 }
14890 
14891 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14892 				    struct intel_crtc_state *crtc_state)
14893 {
14894 	struct intel_crtc_scaler_state *scaler_state =
14895 		&crtc_state->scaler_state;
14896 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14897 	int i;
14898 
14899 	crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14900 	if (!crtc->num_scalers)
14901 		return;
14902 
14903 	for (i = 0; i < crtc->num_scalers; i++) {
14904 		struct intel_scaler *scaler = &scaler_state->scalers[i];
14905 
14906 		scaler->in_use = 0;
14907 		scaler->mode = 0;
14908 	}
14909 
14910 	scaler_state->scaler_id = -1;
14911 }
14912 
14913 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14914 {
14915 	struct intel_crtc *intel_crtc;
14916 	struct intel_crtc_state *crtc_state = NULL;
14917 	struct intel_plane *primary = NULL;
14918 	struct intel_plane *cursor = NULL;
14919 	int sprite, ret;
14920 
14921 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14922 	if (!intel_crtc)
14923 		return -ENOMEM;
14924 
14925 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14926 	if (!crtc_state) {
14927 		ret = -ENOMEM;
14928 		goto fail;
14929 	}
14930 	__drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
14931 	intel_crtc->config = crtc_state;
14932 
14933 	primary = intel_primary_plane_create(dev_priv, pipe);
14934 	if (IS_ERR(primary)) {
14935 		ret = PTR_ERR(primary);
14936 		goto fail;
14937 	}
14938 	intel_crtc->plane_ids_mask |= BIT(primary->id);
14939 
14940 	for_each_sprite(dev_priv, pipe, sprite) {
14941 		struct intel_plane *plane;
14942 
14943 		plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14944 		if (IS_ERR(plane)) {
14945 			ret = PTR_ERR(plane);
14946 			goto fail;
14947 		}
14948 		intel_crtc->plane_ids_mask |= BIT(plane->id);
14949 	}
14950 
14951 	cursor = intel_cursor_plane_create(dev_priv, pipe);
14952 	if (IS_ERR(cursor)) {
14953 		ret = PTR_ERR(cursor);
14954 		goto fail;
14955 	}
14956 	intel_crtc->plane_ids_mask |= BIT(cursor->id);
14957 
14958 	ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14959 					&primary->base, &cursor->base,
14960 					&intel_crtc_funcs,
14961 					"pipe %c", pipe_name(pipe));
14962 	if (ret)
14963 		goto fail;
14964 
14965 	intel_crtc->pipe = pipe;
14966 
14967 	/* initialize shared scalers */
14968 	intel_crtc_init_scalers(intel_crtc, crtc_state);
14969 
14970 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14971 	       dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14972 	dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14973 
14974 	if (INTEL_GEN(dev_priv) < 9) {
14975 		enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14976 
14977 		BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14978 		       dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14979 		dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14980 	}
14981 
14982 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14983 
14984 	intel_color_init(intel_crtc);
14985 
14986 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14987 
14988 	return 0;
14989 
14990 fail:
14991 	/*
14992 	 * drm_mode_config_cleanup() will free up any
14993 	 * crtcs/planes already initialized.
14994 	 */
14995 	kfree(crtc_state);
14996 	kfree(intel_crtc);
14997 
14998 	return ret;
14999 }
15000 
15001 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15002 				      struct drm_file *file)
15003 {
15004 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15005 	struct drm_crtc *drmmode_crtc;
15006 	struct intel_crtc *crtc;
15007 
15008 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15009 	if (!drmmode_crtc)
15010 		return -ENOENT;
15011 
15012 	crtc = to_intel_crtc(drmmode_crtc);
15013 	pipe_from_crtc_id->pipe = crtc->pipe;
15014 
15015 	return 0;
15016 }
15017 
15018 static int intel_encoder_clones(struct intel_encoder *encoder)
15019 {
15020 	struct drm_device *dev = encoder->base.dev;
15021 	struct intel_encoder *source_encoder;
15022 	int index_mask = 0;
15023 	int entry = 0;
15024 
15025 	for_each_intel_encoder(dev, source_encoder) {
15026 		if (encoders_cloneable(encoder, source_encoder))
15027 			index_mask |= (1 << entry);
15028 
15029 		entry++;
15030 	}
15031 
15032 	return index_mask;
15033 }
15034 
15035 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15036 {
15037 	if (!IS_MOBILE(dev_priv))
15038 		return false;
15039 
15040 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15041 		return false;
15042 
15043 	if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15044 		return false;
15045 
15046 	return true;
15047 }
15048 
15049 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
15050 {
15051 	if (INTEL_GEN(dev_priv) >= 9)
15052 		return false;
15053 
15054 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15055 		return false;
15056 
15057 	if (HAS_PCH_LPT_H(dev_priv) &&
15058 	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15059 		return false;
15060 
15061 	/* DDI E can't be used if DDI A requires 4 lanes */
15062 	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15063 		return false;
15064 
15065 	if (!dev_priv->vbt.int_crt_support)
15066 		return false;
15067 
15068 	return true;
15069 }
15070 
15071 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15072 {
15073 	int pps_num;
15074 	int pps_idx;
15075 
15076 	if (HAS_DDI(dev_priv))
15077 		return;
15078 	/*
15079 	 * This w/a is needed at least on CPT/PPT, but to be sure apply it
15080 	 * everywhere where registers can be write protected.
15081 	 */
15082 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15083 		pps_num = 2;
15084 	else
15085 		pps_num = 1;
15086 
15087 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15088 		u32 val = I915_READ(PP_CONTROL(pps_idx));
15089 
15090 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15091 		I915_WRITE(PP_CONTROL(pps_idx), val);
15092 	}
15093 }
15094 
15095 static void intel_pps_init(struct drm_i915_private *dev_priv)
15096 {
15097 	if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15098 		dev_priv->pps_mmio_base = PCH_PPS_BASE;
15099 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15100 		dev_priv->pps_mmio_base = VLV_PPS_BASE;
15101 	else
15102 		dev_priv->pps_mmio_base = PPS_BASE;
15103 
15104 	intel_pps_unlock_regs_wa(dev_priv);
15105 }
15106 
15107 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15108 {
15109 	struct intel_encoder *encoder;
15110 	bool dpd_is_edp = false;
15111 
15112 	intel_pps_init(dev_priv);
15113 
15114 	if (!HAS_DISPLAY(dev_priv))
15115 		return;
15116 
15117 	if (IS_ELKHARTLAKE(dev_priv)) {
15118 		intel_ddi_init(dev_priv, PORT_A);
15119 		intel_ddi_init(dev_priv, PORT_B);
15120 		intel_ddi_init(dev_priv, PORT_C);
15121 		icl_dsi_init(dev_priv);
15122 	} else if (INTEL_GEN(dev_priv) >= 11) {
15123 		intel_ddi_init(dev_priv, PORT_A);
15124 		intel_ddi_init(dev_priv, PORT_B);
15125 		intel_ddi_init(dev_priv, PORT_C);
15126 		intel_ddi_init(dev_priv, PORT_D);
15127 		intel_ddi_init(dev_priv, PORT_E);
15128 		/*
15129 		 * On some ICL SKUs port F is not present. No strap bits for
15130 		 * this, so rely on VBT.
15131 		 * Work around broken VBTs on SKUs known to have no port F.
15132 		 */
15133 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
15134 		    intel_bios_is_port_present(dev_priv, PORT_F))
15135 			intel_ddi_init(dev_priv, PORT_F);
15136 
15137 		icl_dsi_init(dev_priv);
15138 	} else if (IS_GEN9_LP(dev_priv)) {
15139 		/*
15140 		 * FIXME: Broxton doesn't support port detection via the
15141 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15142 		 * detect the ports.
15143 		 */
15144 		intel_ddi_init(dev_priv, PORT_A);
15145 		intel_ddi_init(dev_priv, PORT_B);
15146 		intel_ddi_init(dev_priv, PORT_C);
15147 
15148 		vlv_dsi_init(dev_priv);
15149 	} else if (HAS_DDI(dev_priv)) {
15150 		int found;
15151 
15152 		if (intel_ddi_crt_present(dev_priv))
15153 			intel_crt_init(dev_priv);
15154 
15155 		/*
15156 		 * Haswell uses DDI functions to detect digital outputs.
15157 		 * On SKL pre-D0 the strap isn't connected, so we assume
15158 		 * it's there.
15159 		 */
15160 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15161 		/* WaIgnoreDDIAStrap: skl */
15162 		if (found || IS_GEN9_BC(dev_priv))
15163 			intel_ddi_init(dev_priv, PORT_A);
15164 
15165 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15166 		 * register */
15167 		found = I915_READ(SFUSE_STRAP);
15168 
15169 		if (found & SFUSE_STRAP_DDIB_DETECTED)
15170 			intel_ddi_init(dev_priv, PORT_B);
15171 		if (found & SFUSE_STRAP_DDIC_DETECTED)
15172 			intel_ddi_init(dev_priv, PORT_C);
15173 		if (found & SFUSE_STRAP_DDID_DETECTED)
15174 			intel_ddi_init(dev_priv, PORT_D);
15175 		if (found & SFUSE_STRAP_DDIF_DETECTED)
15176 			intel_ddi_init(dev_priv, PORT_F);
15177 		/*
15178 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15179 		 */
15180 		if (IS_GEN9_BC(dev_priv) &&
15181 		    intel_bios_is_port_present(dev_priv, PORT_E))
15182 			intel_ddi_init(dev_priv, PORT_E);
15183 
15184 	} else if (HAS_PCH_SPLIT(dev_priv)) {
15185 		int found;
15186 
15187 		/*
15188 		 * intel_edp_init_connector() depends on this completing first,
15189 		 * to prevent the registration of both eDP and LVDS and the
15190 		 * incorrect sharing of the PPS.
15191 		 */
15192 		intel_lvds_init(dev_priv);
15193 		intel_crt_init(dev_priv);
15194 
15195 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
15196 
15197 		if (ilk_has_edp_a(dev_priv))
15198 			intel_dp_init(dev_priv, DP_A, PORT_A);
15199 
15200 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15201 			/* PCH SDVOB multiplex with HDMIB */
15202 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
15203 			if (!found)
15204 				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
15205 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15206 				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
15207 		}
15208 
15209 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15210 			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
15211 
15212 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15213 			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
15214 
15215 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
15216 			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
15217 
15218 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
15219 			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
15220 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15221 		bool has_edp, has_port;
15222 
15223 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15224 			intel_crt_init(dev_priv);
15225 
15226 		/*
15227 		 * The DP_DETECTED bit is the latched state of the DDC
15228 		 * SDA pin at boot. However since eDP doesn't require DDC
15229 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
15230 		 * eDP ports may have been muxed to an alternate function.
15231 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
15232 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
15233 		 * detect eDP ports.
15234 		 *
15235 		 * Sadly the straps seem to be missing sometimes even for HDMI
15236 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15237 		 * and VBT for the presence of the port. Additionally we can't
15238 		 * trust the port type the VBT declares as we've seen at least
15239 		 * HDMI ports that the VBT claim are DP or eDP.
15240 		 */
15241 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
15242 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15243 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15244 			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
15245 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15246 			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
15247 
15248 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
15249 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15250 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15251 			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
15252 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15253 			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
15254 
15255 		if (IS_CHERRYVIEW(dev_priv)) {
15256 			/*
15257 			 * eDP not supported on port D,
15258 			 * so no need to worry about it
15259 			 */
15260 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15261 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15262 				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
15263 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15264 				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
15265 		}
15266 
15267 		vlv_dsi_init(dev_priv);
15268 	} else if (IS_PINEVIEW(dev_priv)) {
15269 		intel_lvds_init(dev_priv);
15270 		intel_crt_init(dev_priv);
15271 	} else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
15272 		bool found = false;
15273 
15274 		if (IS_MOBILE(dev_priv))
15275 			intel_lvds_init(dev_priv);
15276 
15277 		intel_crt_init(dev_priv);
15278 
15279 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15280 			DRM_DEBUG_KMS("probing SDVOB\n");
15281 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15282 			if (!found && IS_G4X(dev_priv)) {
15283 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15284 				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15285 			}
15286 
15287 			if (!found && IS_G4X(dev_priv))
15288 				intel_dp_init(dev_priv, DP_B, PORT_B);
15289 		}
15290 
15291 		/* Before G4X SDVOC doesn't have its own detect register */
15292 
15293 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15294 			DRM_DEBUG_KMS("probing SDVOC\n");
15295 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15296 		}
15297 
15298 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15299 
15300 			if (IS_G4X(dev_priv)) {
15301 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15302 				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15303 			}
15304 			if (IS_G4X(dev_priv))
15305 				intel_dp_init(dev_priv, DP_C, PORT_C);
15306 		}
15307 
15308 		if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15309 			intel_dp_init(dev_priv, DP_D, PORT_D);
15310 
15311 		if (SUPPORTS_TV(dev_priv))
15312 			intel_tv_init(dev_priv);
15313 	} else if (IS_GEN(dev_priv, 2)) {
15314 		if (IS_I85X(dev_priv))
15315 			intel_lvds_init(dev_priv);
15316 
15317 		intel_crt_init(dev_priv);
15318 		intel_dvo_init(dev_priv);
15319 	}
15320 
15321 	intel_psr_init(dev_priv);
15322 
15323 	for_each_intel_encoder(&dev_priv->drm, encoder) {
15324 		encoder->base.possible_crtcs = encoder->crtc_mask;
15325 		encoder->base.possible_clones =
15326 			intel_encoder_clones(encoder);
15327 	}
15328 
15329 	intel_init_pch_refclk(dev_priv);
15330 
15331 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15332 }
15333 
15334 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15335 {
15336 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15337 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15338 
15339 	drm_framebuffer_cleanup(fb);
15340 
15341 	i915_gem_object_lock(obj);
15342 	WARN_ON(!obj->framebuffer_references--);
15343 	i915_gem_object_unlock(obj);
15344 
15345 	i915_gem_object_put(obj);
15346 
15347 	kfree(intel_fb);
15348 }
15349 
15350 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15351 						struct drm_file *file,
15352 						unsigned int *handle)
15353 {
15354 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15355 
15356 	if (obj->userptr.mm) {
15357 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15358 		return -EINVAL;
15359 	}
15360 
15361 	return drm_gem_handle_create(file, &obj->base, handle);
15362 }
15363 
15364 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15365 					struct drm_file *file,
15366 					unsigned flags, unsigned color,
15367 					struct drm_clip_rect *clips,
15368 					unsigned num_clips)
15369 {
15370 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15371 
15372 	i915_gem_object_flush_if_display(obj);
15373 	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15374 
15375 	return 0;
15376 }
15377 
15378 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15379 	.destroy = intel_user_framebuffer_destroy,
15380 	.create_handle = intel_user_framebuffer_create_handle,
15381 	.dirty = intel_user_framebuffer_dirty,
15382 };
15383 
15384 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15385 				  struct drm_i915_gem_object *obj,
15386 				  struct drm_mode_fb_cmd2 *mode_cmd)
15387 {
15388 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15389 	struct drm_framebuffer *fb = &intel_fb->base;
15390 	u32 max_stride;
15391 	unsigned int tiling, stride;
15392 	int ret = -EINVAL;
15393 	int i;
15394 
15395 	i915_gem_object_lock(obj);
15396 	obj->framebuffer_references++;
15397 	tiling = i915_gem_object_get_tiling(obj);
15398 	stride = i915_gem_object_get_stride(obj);
15399 	i915_gem_object_unlock(obj);
15400 
15401 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15402 		/*
15403 		 * If there's a fence, enforce that
15404 		 * the fb modifier and tiling mode match.
15405 		 */
15406 		if (tiling != I915_TILING_NONE &&
15407 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15408 			DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15409 			goto err;
15410 		}
15411 	} else {
15412 		if (tiling == I915_TILING_X) {
15413 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15414 		} else if (tiling == I915_TILING_Y) {
15415 			DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15416 			goto err;
15417 		}
15418 	}
15419 
15420 	if (!drm_any_plane_has_format(&dev_priv->drm,
15421 				      mode_cmd->pixel_format,
15422 				      mode_cmd->modifier[0])) {
15423 		struct drm_format_name_buf format_name;
15424 
15425 		DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15426 			      drm_get_format_name(mode_cmd->pixel_format,
15427 						  &format_name),
15428 			      mode_cmd->modifier[0]);
15429 		goto err;
15430 	}
15431 
15432 	/*
15433 	 * gen2/3 display engine uses the fence if present,
15434 	 * so the tiling mode must match the fb modifier exactly.
15435 	 */
15436 	if (INTEL_GEN(dev_priv) < 4 &&
15437 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15438 		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15439 		goto err;
15440 	}
15441 
15442 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15443 					 mode_cmd->modifier[0]);
15444 	if (mode_cmd->pitches[0] > max_stride) {
15445 		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15446 			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15447 			      "tiled" : "linear",
15448 			      mode_cmd->pitches[0], max_stride);
15449 		goto err;
15450 	}
15451 
15452 	/*
15453 	 * If there's a fence, enforce that
15454 	 * the fb pitch and fence stride match.
15455 	 */
15456 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15457 		DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15458 			      mode_cmd->pitches[0], stride);
15459 		goto err;
15460 	}
15461 
15462 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15463 	if (mode_cmd->offsets[0] != 0)
15464 		goto err;
15465 
15466 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15467 
15468 	for (i = 0; i < fb->format->num_planes; i++) {
15469 		u32 stride_alignment;
15470 
15471 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15472 			DRM_DEBUG_KMS("bad plane %d handle\n", i);
15473 			goto err;
15474 		}
15475 
15476 		stride_alignment = intel_fb_stride_alignment(fb, i);
15477 
15478 		/*
15479 		 * Display WA #0531: skl,bxt,kbl,glk
15480 		 *
15481 		 * Render decompression and plane width > 3840
15482 		 * combined with horizontal panning requires the
15483 		 * plane stride to be a multiple of 4. We'll just
15484 		 * require the entire fb to accommodate that to avoid
15485 		 * potential runtime errors at plane configuration time.
15486 		 */
15487 		if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15488 		    is_ccs_modifier(fb->modifier))
15489 			stride_alignment *= 4;
15490 
15491 		if (fb->pitches[i] & (stride_alignment - 1)) {
15492 			DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15493 				      i, fb->pitches[i], stride_alignment);
15494 			goto err;
15495 		}
15496 
15497 		fb->obj[i] = &obj->base;
15498 	}
15499 
15500 	ret = intel_fill_fb_info(dev_priv, fb);
15501 	if (ret)
15502 		goto err;
15503 
15504 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15505 	if (ret) {
15506 		DRM_ERROR("framebuffer init failed %d\n", ret);
15507 		goto err;
15508 	}
15509 
15510 	return 0;
15511 
15512 err:
15513 	i915_gem_object_lock(obj);
15514 	obj->framebuffer_references--;
15515 	i915_gem_object_unlock(obj);
15516 	return ret;
15517 }
15518 
15519 static struct drm_framebuffer *
15520 intel_user_framebuffer_create(struct drm_device *dev,
15521 			      struct drm_file *filp,
15522 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
15523 {
15524 	struct drm_framebuffer *fb;
15525 	struct drm_i915_gem_object *obj;
15526 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15527 
15528 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15529 	if (!obj)
15530 		return ERR_PTR(-ENOENT);
15531 
15532 	fb = intel_framebuffer_create(obj, &mode_cmd);
15533 	if (IS_ERR(fb))
15534 		i915_gem_object_put(obj);
15535 
15536 	return fb;
15537 }
15538 
15539 static void intel_atomic_state_free(struct drm_atomic_state *state)
15540 {
15541 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15542 
15543 	drm_atomic_state_default_release(state);
15544 
15545 	i915_sw_fence_fini(&intel_state->commit_ready);
15546 
15547 	kfree(state);
15548 }
15549 
15550 static enum drm_mode_status
15551 intel_mode_valid(struct drm_device *dev,
15552 		 const struct drm_display_mode *mode)
15553 {
15554 	struct drm_i915_private *dev_priv = to_i915(dev);
15555 	int hdisplay_max, htotal_max;
15556 	int vdisplay_max, vtotal_max;
15557 
15558 	/*
15559 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15560 	 * of DBLSCAN modes to the output's mode list when they detect
15561 	 * the scaling mode property on the connector. And they don't
15562 	 * ask the kernel to validate those modes in any way until
15563 	 * modeset time at which point the client gets a protocol error.
15564 	 * So in order to not upset those clients we silently ignore the
15565 	 * DBLSCAN flag on such connectors. For other connectors we will
15566 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
15567 	 * And we always reject DBLSCAN modes in connector->mode_valid()
15568 	 * as we never want such modes on the connector's mode list.
15569 	 */
15570 
15571 	if (mode->vscan > 1)
15572 		return MODE_NO_VSCAN;
15573 
15574 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
15575 		return MODE_H_ILLEGAL;
15576 
15577 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15578 			   DRM_MODE_FLAG_NCSYNC |
15579 			   DRM_MODE_FLAG_PCSYNC))
15580 		return MODE_HSYNC;
15581 
15582 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
15583 			   DRM_MODE_FLAG_PIXMUX |
15584 			   DRM_MODE_FLAG_CLKDIV2))
15585 		return MODE_BAD;
15586 
15587 	if (INTEL_GEN(dev_priv) >= 9 ||
15588 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15589 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15590 		vdisplay_max = 4096;
15591 		htotal_max = 8192;
15592 		vtotal_max = 8192;
15593 	} else if (INTEL_GEN(dev_priv) >= 3) {
15594 		hdisplay_max = 4096;
15595 		vdisplay_max = 4096;
15596 		htotal_max = 8192;
15597 		vtotal_max = 8192;
15598 	} else {
15599 		hdisplay_max = 2048;
15600 		vdisplay_max = 2048;
15601 		htotal_max = 4096;
15602 		vtotal_max = 4096;
15603 	}
15604 
15605 	if (mode->hdisplay > hdisplay_max ||
15606 	    mode->hsync_start > htotal_max ||
15607 	    mode->hsync_end > htotal_max ||
15608 	    mode->htotal > htotal_max)
15609 		return MODE_H_ILLEGAL;
15610 
15611 	if (mode->vdisplay > vdisplay_max ||
15612 	    mode->vsync_start > vtotal_max ||
15613 	    mode->vsync_end > vtotal_max ||
15614 	    mode->vtotal > vtotal_max)
15615 		return MODE_V_ILLEGAL;
15616 
15617 	return MODE_OK;
15618 }
15619 
15620 static const struct drm_mode_config_funcs intel_mode_funcs = {
15621 	.fb_create = intel_user_framebuffer_create,
15622 	.get_format_info = intel_get_format_info,
15623 	.output_poll_changed = intel_fbdev_output_poll_changed,
15624 	.mode_valid = intel_mode_valid,
15625 	.atomic_check = intel_atomic_check,
15626 	.atomic_commit = intel_atomic_commit,
15627 	.atomic_state_alloc = intel_atomic_state_alloc,
15628 	.atomic_state_clear = intel_atomic_state_clear,
15629 	.atomic_state_free = intel_atomic_state_free,
15630 };
15631 
15632 /**
15633  * intel_init_display_hooks - initialize the display modesetting hooks
15634  * @dev_priv: device private
15635  */
15636 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15637 {
15638 	intel_init_cdclk_hooks(dev_priv);
15639 
15640 	if (INTEL_GEN(dev_priv) >= 9) {
15641 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15642 		dev_priv->display.get_initial_plane_config =
15643 			skylake_get_initial_plane_config;
15644 		dev_priv->display.crtc_compute_clock =
15645 			haswell_crtc_compute_clock;
15646 		dev_priv->display.crtc_enable = haswell_crtc_enable;
15647 		dev_priv->display.crtc_disable = haswell_crtc_disable;
15648 	} else if (HAS_DDI(dev_priv)) {
15649 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15650 		dev_priv->display.get_initial_plane_config =
15651 			i9xx_get_initial_plane_config;
15652 		dev_priv->display.crtc_compute_clock =
15653 			haswell_crtc_compute_clock;
15654 		dev_priv->display.crtc_enable = haswell_crtc_enable;
15655 		dev_priv->display.crtc_disable = haswell_crtc_disable;
15656 	} else if (HAS_PCH_SPLIT(dev_priv)) {
15657 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15658 		dev_priv->display.get_initial_plane_config =
15659 			i9xx_get_initial_plane_config;
15660 		dev_priv->display.crtc_compute_clock =
15661 			ironlake_crtc_compute_clock;
15662 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
15663 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
15664 	} else if (IS_CHERRYVIEW(dev_priv)) {
15665 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15666 		dev_priv->display.get_initial_plane_config =
15667 			i9xx_get_initial_plane_config;
15668 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15669 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
15670 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15671 	} else if (IS_VALLEYVIEW(dev_priv)) {
15672 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15673 		dev_priv->display.get_initial_plane_config =
15674 			i9xx_get_initial_plane_config;
15675 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15676 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
15677 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15678 	} else if (IS_G4X(dev_priv)) {
15679 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15680 		dev_priv->display.get_initial_plane_config =
15681 			i9xx_get_initial_plane_config;
15682 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15683 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15684 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15685 	} else if (IS_PINEVIEW(dev_priv)) {
15686 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15687 		dev_priv->display.get_initial_plane_config =
15688 			i9xx_get_initial_plane_config;
15689 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15690 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15691 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15692 	} else if (!IS_GEN(dev_priv, 2)) {
15693 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15694 		dev_priv->display.get_initial_plane_config =
15695 			i9xx_get_initial_plane_config;
15696 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15697 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15698 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15699 	} else {
15700 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15701 		dev_priv->display.get_initial_plane_config =
15702 			i9xx_get_initial_plane_config;
15703 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15704 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15705 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15706 	}
15707 
15708 	if (IS_GEN(dev_priv, 5)) {
15709 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15710 	} else if (IS_GEN(dev_priv, 6)) {
15711 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15712 	} else if (IS_IVYBRIDGE(dev_priv)) {
15713 		/* FIXME: detect B0+ stepping and use auto training */
15714 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15715 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15716 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15717 	}
15718 
15719 	if (INTEL_GEN(dev_priv) >= 9)
15720 		dev_priv->display.update_crtcs = skl_update_crtcs;
15721 	else
15722 		dev_priv->display.update_crtcs = intel_update_crtcs;
15723 }
15724 
15725 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
15726 {
15727 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15728 		return VLV_VGACNTRL;
15729 	else if (INTEL_GEN(dev_priv) >= 5)
15730 		return CPU_VGACNTRL;
15731 	else
15732 		return VGACNTRL;
15733 }
15734 
15735 /* Disable the VGA plane that we never use */
15736 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15737 {
15738 	struct pci_dev *pdev = dev_priv->drm.pdev;
15739 	u8 sr1;
15740 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15741 
15742 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15743 	vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15744 	outb(SR01, VGA_SR_INDEX);
15745 	sr1 = inb(VGA_SR_DATA);
15746 	outb(sr1 | 1<<5, VGA_SR_DATA);
15747 	vga_put(pdev, VGA_RSRC_LEGACY_IO);
15748 	udelay(300);
15749 
15750 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15751 	POSTING_READ(vga_reg);
15752 }
15753 
15754 void intel_modeset_init_hw(struct drm_device *dev)
15755 {
15756 	struct drm_i915_private *dev_priv = to_i915(dev);
15757 
15758 	intel_update_cdclk(dev_priv);
15759 	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15760 	dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15761 }
15762 
15763 /*
15764  * Calculate what we think the watermarks should be for the state we've read
15765  * out of the hardware and then immediately program those watermarks so that
15766  * we ensure the hardware settings match our internal state.
15767  *
15768  * We can calculate what we think WM's should be by creating a duplicate of the
15769  * current state (which was constructed during hardware readout) and running it
15770  * through the atomic check code to calculate new watermark values in the
15771  * state object.
15772  */
15773 static void sanitize_watermarks(struct drm_device *dev)
15774 {
15775 	struct drm_i915_private *dev_priv = to_i915(dev);
15776 	struct drm_atomic_state *state;
15777 	struct intel_atomic_state *intel_state;
15778 	struct drm_crtc *crtc;
15779 	struct drm_crtc_state *cstate;
15780 	struct drm_modeset_acquire_ctx ctx;
15781 	int ret;
15782 	int i;
15783 
15784 	/* Only supported on platforms that use atomic watermark design */
15785 	if (!dev_priv->display.optimize_watermarks)
15786 		return;
15787 
15788 	/*
15789 	 * We need to hold connection_mutex before calling duplicate_state so
15790 	 * that the connector loop is protected.
15791 	 */
15792 	drm_modeset_acquire_init(&ctx, 0);
15793 retry:
15794 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
15795 	if (ret == -EDEADLK) {
15796 		drm_modeset_backoff(&ctx);
15797 		goto retry;
15798 	} else if (WARN_ON(ret)) {
15799 		goto fail;
15800 	}
15801 
15802 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
15803 	if (WARN_ON(IS_ERR(state)))
15804 		goto fail;
15805 
15806 	intel_state = to_intel_atomic_state(state);
15807 
15808 	/*
15809 	 * Hardware readout is the only time we don't want to calculate
15810 	 * intermediate watermarks (since we don't trust the current
15811 	 * watermarks).
15812 	 */
15813 	if (!HAS_GMCH(dev_priv))
15814 		intel_state->skip_intermediate_wm = true;
15815 
15816 	ret = intel_atomic_check(dev, state);
15817 	if (ret) {
15818 		/*
15819 		 * If we fail here, it means that the hardware appears to be
15820 		 * programmed in a way that shouldn't be possible, given our
15821 		 * understanding of watermark requirements.  This might mean a
15822 		 * mistake in the hardware readout code or a mistake in the
15823 		 * watermark calculations for a given platform.  Raise a WARN
15824 		 * so that this is noticeable.
15825 		 *
15826 		 * If this actually happens, we'll have to just leave the
15827 		 * BIOS-programmed watermarks untouched and hope for the best.
15828 		 */
15829 		WARN(true, "Could not determine valid watermarks for inherited state\n");
15830 		goto put_state;
15831 	}
15832 
15833 	/* Write calculated watermark values back */
15834 	for_each_new_crtc_in_state(state, crtc, cstate, i) {
15835 		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15836 
15837 		cs->wm.need_postvbl_update = true;
15838 		dev_priv->display.optimize_watermarks(intel_state, cs);
15839 
15840 		to_intel_crtc_state(crtc->state)->wm = cs->wm;
15841 	}
15842 
15843 put_state:
15844 	drm_atomic_state_put(state);
15845 fail:
15846 	drm_modeset_drop_locks(&ctx);
15847 	drm_modeset_acquire_fini(&ctx);
15848 }
15849 
15850 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15851 {
15852 	if (IS_GEN(dev_priv, 5)) {
15853 		u32 fdi_pll_clk =
15854 			I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15855 
15856 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15857 	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15858 		dev_priv->fdi_pll_freq = 270000;
15859 	} else {
15860 		return;
15861 	}
15862 
15863 	DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15864 }
15865 
15866 static int intel_initial_commit(struct drm_device *dev)
15867 {
15868 	struct drm_atomic_state *state = NULL;
15869 	struct drm_modeset_acquire_ctx ctx;
15870 	struct drm_crtc *crtc;
15871 	struct drm_crtc_state *crtc_state;
15872 	int ret = 0;
15873 
15874 	state = drm_atomic_state_alloc(dev);
15875 	if (!state)
15876 		return -ENOMEM;
15877 
15878 	drm_modeset_acquire_init(&ctx, 0);
15879 
15880 retry:
15881 	state->acquire_ctx = &ctx;
15882 
15883 	drm_for_each_crtc(crtc, dev) {
15884 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
15885 		if (IS_ERR(crtc_state)) {
15886 			ret = PTR_ERR(crtc_state);
15887 			goto out;
15888 		}
15889 
15890 		if (crtc_state->active) {
15891 			ret = drm_atomic_add_affected_planes(state, crtc);
15892 			if (ret)
15893 				goto out;
15894 
15895 			/*
15896 			 * FIXME hack to force a LUT update to avoid the
15897 			 * plane update forcing the pipe gamma on without
15898 			 * having a proper LUT loaded. Remove once we
15899 			 * have readout for pipe gamma enable.
15900 			 */
15901 			crtc_state->color_mgmt_changed = true;
15902 		}
15903 	}
15904 
15905 	ret = drm_atomic_commit(state);
15906 
15907 out:
15908 	if (ret == -EDEADLK) {
15909 		drm_atomic_state_clear(state);
15910 		drm_modeset_backoff(&ctx);
15911 		goto retry;
15912 	}
15913 
15914 	drm_atomic_state_put(state);
15915 
15916 	drm_modeset_drop_locks(&ctx);
15917 	drm_modeset_acquire_fini(&ctx);
15918 
15919 	return ret;
15920 }
15921 
15922 int intel_modeset_init(struct drm_device *dev)
15923 {
15924 	struct drm_i915_private *dev_priv = to_i915(dev);
15925 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
15926 	enum pipe pipe;
15927 	struct intel_crtc *crtc;
15928 	int ret;
15929 
15930 	dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15931 
15932 	drm_mode_config_init(dev);
15933 
15934 	ret = intel_bw_init(dev_priv);
15935 	if (ret)
15936 		return ret;
15937 
15938 	dev->mode_config.min_width = 0;
15939 	dev->mode_config.min_height = 0;
15940 
15941 	dev->mode_config.preferred_depth = 24;
15942 	dev->mode_config.prefer_shadow = 1;
15943 
15944 	dev->mode_config.allow_fb_modifiers = true;
15945 
15946 	dev->mode_config.funcs = &intel_mode_funcs;
15947 
15948 	init_llist_head(&dev_priv->atomic_helper.free_list);
15949 	INIT_WORK(&dev_priv->atomic_helper.free_work,
15950 		  intel_atomic_helper_free_state_worker);
15951 
15952 	intel_init_quirks(dev_priv);
15953 
15954 	intel_fbc_init(dev_priv);
15955 
15956 	intel_init_pm(dev_priv);
15957 
15958 	/*
15959 	 * There may be no VBT; and if the BIOS enabled SSC we can
15960 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15961 	 * BIOS isn't using it, don't assume it will work even if the VBT
15962 	 * indicates as much.
15963 	 */
15964 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15965 		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15966 					    DREF_SSC1_ENABLE);
15967 
15968 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15969 			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15970 				     bios_lvds_use_ssc ? "en" : "dis",
15971 				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15972 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15973 		}
15974 	}
15975 
15976 	/*
15977 	 * Maximum framebuffer dimensions, chosen to match
15978 	 * the maximum render engine surface size on gen4+.
15979 	 */
15980 	if (INTEL_GEN(dev_priv) >= 7) {
15981 		dev->mode_config.max_width = 16384;
15982 		dev->mode_config.max_height = 16384;
15983 	} else if (INTEL_GEN(dev_priv) >= 4) {
15984 		dev->mode_config.max_width = 8192;
15985 		dev->mode_config.max_height = 8192;
15986 	} else if (IS_GEN(dev_priv, 3)) {
15987 		dev->mode_config.max_width = 4096;
15988 		dev->mode_config.max_height = 4096;
15989 	} else {
15990 		dev->mode_config.max_width = 2048;
15991 		dev->mode_config.max_height = 2048;
15992 	}
15993 
15994 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15995 		dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15996 		dev->mode_config.cursor_height = 1023;
15997 	} else if (IS_GEN(dev_priv, 2)) {
15998 		dev->mode_config.cursor_width = 64;
15999 		dev->mode_config.cursor_height = 64;
16000 	} else {
16001 		dev->mode_config.cursor_width = 256;
16002 		dev->mode_config.cursor_height = 256;
16003 	}
16004 
16005 	dev->mode_config.fb_base = ggtt->gmadr.start;
16006 
16007 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
16008 		      INTEL_INFO(dev_priv)->num_pipes,
16009 		      INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
16010 
16011 	for_each_pipe(dev_priv, pipe) {
16012 		ret = intel_crtc_init(dev_priv, pipe);
16013 		if (ret) {
16014 			drm_mode_config_cleanup(dev);
16015 			return ret;
16016 		}
16017 	}
16018 
16019 	intel_shared_dpll_init(dev);
16020 	intel_update_fdi_pll_freq(dev_priv);
16021 
16022 	intel_update_czclk(dev_priv);
16023 	intel_modeset_init_hw(dev);
16024 
16025 	intel_hdcp_component_init(dev_priv);
16026 
16027 	if (dev_priv->max_cdclk_freq == 0)
16028 		intel_update_max_cdclk(dev_priv);
16029 
16030 	/* Just disable it once at startup */
16031 	i915_disable_vga(dev_priv);
16032 	intel_setup_outputs(dev_priv);
16033 
16034 	drm_modeset_lock_all(dev);
16035 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
16036 	drm_modeset_unlock_all(dev);
16037 
16038 	for_each_intel_crtc(dev, crtc) {
16039 		struct intel_initial_plane_config plane_config = {};
16040 
16041 		if (!crtc->active)
16042 			continue;
16043 
16044 		/*
16045 		 * Note that reserving the BIOS fb up front prevents us
16046 		 * from stuffing other stolen allocations like the ring
16047 		 * on top.  This prevents some ugliness at boot time, and
16048 		 * can even allow for smooth boot transitions if the BIOS
16049 		 * fb is large enough for the active pipe configuration.
16050 		 */
16051 		dev_priv->display.get_initial_plane_config(crtc,
16052 							   &plane_config);
16053 
16054 		/*
16055 		 * If the fb is shared between multiple heads, we'll
16056 		 * just get the first one.
16057 		 */
16058 		intel_find_initial_plane_obj(crtc, &plane_config);
16059 	}
16060 
16061 	/*
16062 	 * Make sure hardware watermarks really match the state we read out.
16063 	 * Note that we need to do this after reconstructing the BIOS fb's
16064 	 * since the watermark calculation done here will use pstate->fb.
16065 	 */
16066 	if (!HAS_GMCH(dev_priv))
16067 		sanitize_watermarks(dev);
16068 
16069 	/*
16070 	 * Force all active planes to recompute their states. So that on
16071 	 * mode_setcrtc after probe, all the intel_plane_state variables
16072 	 * are already calculated and there is no assert_plane warnings
16073 	 * during bootup.
16074 	 */
16075 	ret = intel_initial_commit(dev);
16076 	if (ret)
16077 		DRM_DEBUG_KMS("Initial commit in probe failed.\n");
16078 
16079 	return 0;
16080 }
16081 
16082 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16083 {
16084 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16085 	/* 640x480@60Hz, ~25175 kHz */
16086 	struct dpll clock = {
16087 		.m1 = 18,
16088 		.m2 = 7,
16089 		.p1 = 13,
16090 		.p2 = 4,
16091 		.n = 2,
16092 	};
16093 	u32 dpll, fp;
16094 	int i;
16095 
16096 	WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16097 
16098 	DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16099 		      pipe_name(pipe), clock.vco, clock.dot);
16100 
16101 	fp = i9xx_dpll_compute_fp(&clock);
16102 	dpll = DPLL_DVO_2X_MODE |
16103 		DPLL_VGA_MODE_DIS |
16104 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16105 		PLL_P2_DIVIDE_BY_4 |
16106 		PLL_REF_INPUT_DREFCLK |
16107 		DPLL_VCO_ENABLE;
16108 
16109 	I915_WRITE(FP0(pipe), fp);
16110 	I915_WRITE(FP1(pipe), fp);
16111 
16112 	I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16113 	I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16114 	I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16115 	I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16116 	I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16117 	I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16118 	I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16119 
16120 	/*
16121 	 * Apparently we need to have VGA mode enabled prior to changing
16122 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16123 	 * dividers, even though the register value does change.
16124 	 */
16125 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16126 	I915_WRITE(DPLL(pipe), dpll);
16127 
16128 	/* Wait for the clocks to stabilize. */
16129 	POSTING_READ(DPLL(pipe));
16130 	udelay(150);
16131 
16132 	/* The pixel multiplier can only be updated once the
16133 	 * DPLL is enabled and the clocks are stable.
16134 	 *
16135 	 * So write it again.
16136 	 */
16137 	I915_WRITE(DPLL(pipe), dpll);
16138 
16139 	/* We do this three times for luck */
16140 	for (i = 0; i < 3 ; i++) {
16141 		I915_WRITE(DPLL(pipe), dpll);
16142 		POSTING_READ(DPLL(pipe));
16143 		udelay(150); /* wait for warmup */
16144 	}
16145 
16146 	I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16147 	POSTING_READ(PIPECONF(pipe));
16148 
16149 	intel_wait_for_pipe_scanline_moving(crtc);
16150 }
16151 
16152 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16153 {
16154 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16155 
16156 	DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16157 		      pipe_name(pipe));
16158 
16159 	WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16160 	WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16161 	WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
16162 	WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16163 	WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
16164 
16165 	I915_WRITE(PIPECONF(pipe), 0);
16166 	POSTING_READ(PIPECONF(pipe));
16167 
16168 	intel_wait_for_pipe_scanline_stopped(crtc);
16169 
16170 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16171 	POSTING_READ(DPLL(pipe));
16172 }
16173 
16174 static void
16175 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16176 {
16177 	struct intel_crtc *crtc;
16178 
16179 	if (INTEL_GEN(dev_priv) >= 4)
16180 		return;
16181 
16182 	for_each_intel_crtc(&dev_priv->drm, crtc) {
16183 		struct intel_plane *plane =
16184 			to_intel_plane(crtc->base.primary);
16185 		struct intel_crtc *plane_crtc;
16186 		enum pipe pipe;
16187 
16188 		if (!plane->get_hw_state(plane, &pipe))
16189 			continue;
16190 
16191 		if (pipe == crtc->pipe)
16192 			continue;
16193 
16194 		DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16195 			      plane->base.base.id, plane->base.name);
16196 
16197 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16198 		intel_plane_disable_noatomic(plane_crtc, plane);
16199 	}
16200 }
16201 
16202 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16203 {
16204 	struct drm_device *dev = crtc->base.dev;
16205 	struct intel_encoder *encoder;
16206 
16207 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16208 		return true;
16209 
16210 	return false;
16211 }
16212 
16213 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16214 {
16215 	struct drm_device *dev = encoder->base.dev;
16216 	struct intel_connector *connector;
16217 
16218 	for_each_connector_on_encoder(dev, &encoder->base, connector)
16219 		return connector;
16220 
16221 	return NULL;
16222 }
16223 
16224 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16225 			      enum pipe pch_transcoder)
16226 {
16227 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16228 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
16229 }
16230 
16231 static void intel_sanitize_crtc(struct intel_crtc *crtc,
16232 				struct drm_modeset_acquire_ctx *ctx)
16233 {
16234 	struct drm_device *dev = crtc->base.dev;
16235 	struct drm_i915_private *dev_priv = to_i915(dev);
16236 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16237 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
16238 
16239 	/* Clear any frame start delays used for debugging left by the BIOS */
16240 	if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
16241 		i915_reg_t reg = PIPECONF(cpu_transcoder);
16242 
16243 		I915_WRITE(reg,
16244 			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16245 	}
16246 
16247 	if (crtc_state->base.active) {
16248 		struct intel_plane *plane;
16249 
16250 		/* Disable everything but the primary plane */
16251 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
16252 			const struct intel_plane_state *plane_state =
16253 				to_intel_plane_state(plane->base.state);
16254 
16255 			if (plane_state->base.visible &&
16256 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16257 				intel_plane_disable_noatomic(crtc, plane);
16258 		}
16259 
16260 		/*
16261 		 * Disable any background color set by the BIOS, but enable the
16262 		 * gamma and CSC to match how we program our planes.
16263 		 */
16264 		if (INTEL_GEN(dev_priv) >= 9)
16265 			I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16266 				   SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16267 				   SKL_BOTTOM_COLOR_CSC_ENABLE);
16268 	}
16269 
16270 	/* Adjust the state of the output pipe according to whether we
16271 	 * have active connectors/encoders. */
16272 	if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
16273 		intel_crtc_disable_noatomic(&crtc->base, ctx);
16274 
16275 	if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
16276 		/*
16277 		 * We start out with underrun reporting disabled to avoid races.
16278 		 * For correct bookkeeping mark this on active crtcs.
16279 		 *
16280 		 * Also on gmch platforms we dont have any hardware bits to
16281 		 * disable the underrun reporting. Which means we need to start
16282 		 * out with underrun reporting disabled also on inactive pipes,
16283 		 * since otherwise we'll complain about the garbage we read when
16284 		 * e.g. coming up after runtime pm.
16285 		 *
16286 		 * No protection against concurrent access is required - at
16287 		 * worst a fifo underrun happens which also sets this to false.
16288 		 */
16289 		crtc->cpu_fifo_underrun_disabled = true;
16290 		/*
16291 		 * We track the PCH trancoder underrun reporting state
16292 		 * within the crtc. With crtc for pipe A housing the underrun
16293 		 * reporting state for PCH transcoder A, crtc for pipe B housing
16294 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16295 		 * and marking underrun reporting as disabled for the non-existing
16296 		 * PCH transcoders B and C would prevent enabling the south
16297 		 * error interrupt (see cpt_can_enable_serr_int()).
16298 		 */
16299 		if (has_pch_trancoder(dev_priv, crtc->pipe))
16300 			crtc->pch_fifo_underrun_disabled = true;
16301 	}
16302 }
16303 
16304 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16305 {
16306 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16307 
16308 	/*
16309 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16310 	 * the hardware when a high res displays plugged in. DPLL P
16311 	 * divider is zero, and the pipe timings are bonkers. We'll
16312 	 * try to disable everything in that case.
16313 	 *
16314 	 * FIXME would be nice to be able to sanitize this state
16315 	 * without several WARNs, but for now let's take the easy
16316 	 * road.
16317 	 */
16318 	return IS_GEN(dev_priv, 6) &&
16319 		crtc_state->base.active &&
16320 		crtc_state->shared_dpll &&
16321 		crtc_state->port_clock == 0;
16322 }
16323 
16324 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16325 {
16326 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16327 	struct intel_connector *connector;
16328 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16329 	struct intel_crtc_state *crtc_state = crtc ?
16330 		to_intel_crtc_state(crtc->base.state) : NULL;
16331 
16332 	/* We need to check both for a crtc link (meaning that the
16333 	 * encoder is active and trying to read from a pipe) and the
16334 	 * pipe itself being active. */
16335 	bool has_active_crtc = crtc_state &&
16336 		crtc_state->base.active;
16337 
16338 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16339 		DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16340 			      pipe_name(crtc->pipe));
16341 		has_active_crtc = false;
16342 	}
16343 
16344 	connector = intel_encoder_find_connector(encoder);
16345 	if (connector && !has_active_crtc) {
16346 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16347 			      encoder->base.base.id,
16348 			      encoder->base.name);
16349 
16350 		/* Connector is active, but has no active pipe. This is
16351 		 * fallout from our resume register restoring. Disable
16352 		 * the encoder manually again. */
16353 		if (crtc_state) {
16354 			struct drm_encoder *best_encoder;
16355 
16356 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16357 				      encoder->base.base.id,
16358 				      encoder->base.name);
16359 
16360 			/* avoid oopsing in case the hooks consult best_encoder */
16361 			best_encoder = connector->base.state->best_encoder;
16362 			connector->base.state->best_encoder = &encoder->base;
16363 
16364 			if (encoder->disable)
16365 				encoder->disable(encoder, crtc_state,
16366 						 connector->base.state);
16367 			if (encoder->post_disable)
16368 				encoder->post_disable(encoder, crtc_state,
16369 						      connector->base.state);
16370 
16371 			connector->base.state->best_encoder = best_encoder;
16372 		}
16373 		encoder->base.crtc = NULL;
16374 
16375 		/* Inconsistent output/port/pipe state happens presumably due to
16376 		 * a bug in one of the get_hw_state functions. Or someplace else
16377 		 * in our code, like the register restore mess on resume. Clamp
16378 		 * things to off as a safer default. */
16379 
16380 		connector->base.dpms = DRM_MODE_DPMS_OFF;
16381 		connector->base.encoder = NULL;
16382 	}
16383 
16384 	/* notify opregion of the sanitized encoder state */
16385 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16386 
16387 	if (INTEL_GEN(dev_priv) >= 11)
16388 		icl_sanitize_encoder_pll_mapping(encoder);
16389 }
16390 
16391 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16392 {
16393 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16394 
16395 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16396 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16397 		i915_disable_vga(dev_priv);
16398 	}
16399 }
16400 
16401 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16402 {
16403 	intel_wakeref_t wakeref;
16404 
16405 	/*
16406 	 * This function can be called both from intel_modeset_setup_hw_state or
16407 	 * at a very early point in our resume sequence, where the power well
16408 	 * structures are not yet restored. Since this function is at a very
16409 	 * paranoid "someone might have enabled VGA while we were not looking"
16410 	 * level, just check if the power well is enabled instead of trying to
16411 	 * follow the "don't touch the power well if we don't need it" policy
16412 	 * the rest of the driver uses.
16413 	 */
16414 	wakeref = intel_display_power_get_if_enabled(dev_priv,
16415 						     POWER_DOMAIN_VGA);
16416 	if (!wakeref)
16417 		return;
16418 
16419 	i915_redisable_vga_power_on(dev_priv);
16420 
16421 	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16422 }
16423 
16424 /* FIXME read out full plane state for all planes */
16425 static void readout_plane_state(struct drm_i915_private *dev_priv)
16426 {
16427 	struct intel_plane *plane;
16428 	struct intel_crtc *crtc;
16429 
16430 	for_each_intel_plane(&dev_priv->drm, plane) {
16431 		struct intel_plane_state *plane_state =
16432 			to_intel_plane_state(plane->base.state);
16433 		struct intel_crtc_state *crtc_state;
16434 		enum pipe pipe = PIPE_A;
16435 		bool visible;
16436 
16437 		visible = plane->get_hw_state(plane, &pipe);
16438 
16439 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16440 		crtc_state = to_intel_crtc_state(crtc->base.state);
16441 
16442 		intel_set_plane_visible(crtc_state, plane_state, visible);
16443 
16444 		DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16445 			      plane->base.base.id, plane->base.name,
16446 			      enableddisabled(visible), pipe_name(pipe));
16447 	}
16448 
16449 	for_each_intel_crtc(&dev_priv->drm, crtc) {
16450 		struct intel_crtc_state *crtc_state =
16451 			to_intel_crtc_state(crtc->base.state);
16452 
16453 		fixup_active_planes(crtc_state);
16454 	}
16455 }
16456 
16457 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16458 {
16459 	struct drm_i915_private *dev_priv = to_i915(dev);
16460 	enum pipe pipe;
16461 	struct intel_crtc *crtc;
16462 	struct intel_encoder *encoder;
16463 	struct intel_connector *connector;
16464 	struct drm_connector_list_iter conn_iter;
16465 	int i;
16466 
16467 	dev_priv->active_crtcs = 0;
16468 
16469 	for_each_intel_crtc(dev, crtc) {
16470 		struct intel_crtc_state *crtc_state =
16471 			to_intel_crtc_state(crtc->base.state);
16472 
16473 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16474 		memset(crtc_state, 0, sizeof(*crtc_state));
16475 		__drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
16476 
16477 		crtc_state->base.active = crtc_state->base.enable =
16478 			dev_priv->display.get_pipe_config(crtc, crtc_state);
16479 
16480 		crtc->base.enabled = crtc_state->base.enable;
16481 		crtc->active = crtc_state->base.active;
16482 
16483 		if (crtc_state->base.active)
16484 			dev_priv->active_crtcs |= 1 << crtc->pipe;
16485 
16486 		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16487 			      crtc->base.base.id, crtc->base.name,
16488 			      enableddisabled(crtc_state->base.active));
16489 	}
16490 
16491 	readout_plane_state(dev_priv);
16492 
16493 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16494 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16495 
16496 		pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16497 							&pll->state.hw_state);
16498 		pll->state.crtc_mask = 0;
16499 		for_each_intel_crtc(dev, crtc) {
16500 			struct intel_crtc_state *crtc_state =
16501 				to_intel_crtc_state(crtc->base.state);
16502 
16503 			if (crtc_state->base.active &&
16504 			    crtc_state->shared_dpll == pll)
16505 				pll->state.crtc_mask |= 1 << crtc->pipe;
16506 		}
16507 		pll->active_mask = pll->state.crtc_mask;
16508 
16509 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16510 			      pll->info->name, pll->state.crtc_mask, pll->on);
16511 	}
16512 
16513 	for_each_intel_encoder(dev, encoder) {
16514 		pipe = 0;
16515 
16516 		if (encoder->get_hw_state(encoder, &pipe)) {
16517 			struct intel_crtc_state *crtc_state;
16518 
16519 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16520 			crtc_state = to_intel_crtc_state(crtc->base.state);
16521 
16522 			encoder->base.crtc = &crtc->base;
16523 			encoder->get_config(encoder, crtc_state);
16524 		} else {
16525 			encoder->base.crtc = NULL;
16526 		}
16527 
16528 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16529 			      encoder->base.base.id, encoder->base.name,
16530 			      enableddisabled(encoder->base.crtc),
16531 			      pipe_name(pipe));
16532 	}
16533 
16534 	drm_connector_list_iter_begin(dev, &conn_iter);
16535 	for_each_intel_connector_iter(connector, &conn_iter) {
16536 		if (connector->get_hw_state(connector)) {
16537 			connector->base.dpms = DRM_MODE_DPMS_ON;
16538 
16539 			encoder = connector->encoder;
16540 			connector->base.encoder = &encoder->base;
16541 
16542 			if (encoder->base.crtc &&
16543 			    encoder->base.crtc->state->active) {
16544 				/*
16545 				 * This has to be done during hardware readout
16546 				 * because anything calling .crtc_disable may
16547 				 * rely on the connector_mask being accurate.
16548 				 */
16549 				encoder->base.crtc->state->connector_mask |=
16550 					drm_connector_mask(&connector->base);
16551 				encoder->base.crtc->state->encoder_mask |=
16552 					drm_encoder_mask(&encoder->base);
16553 			}
16554 
16555 		} else {
16556 			connector->base.dpms = DRM_MODE_DPMS_OFF;
16557 			connector->base.encoder = NULL;
16558 		}
16559 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16560 			      connector->base.base.id, connector->base.name,
16561 			      enableddisabled(connector->base.encoder));
16562 	}
16563 	drm_connector_list_iter_end(&conn_iter);
16564 
16565 	for_each_intel_crtc(dev, crtc) {
16566 		struct intel_bw_state *bw_state =
16567 			to_intel_bw_state(dev_priv->bw_obj.state);
16568 		struct intel_crtc_state *crtc_state =
16569 			to_intel_crtc_state(crtc->base.state);
16570 		struct intel_plane *plane;
16571 		int min_cdclk = 0;
16572 
16573 		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16574 		if (crtc_state->base.active) {
16575 			intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16576 			crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16577 			crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16578 			intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16579 			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16580 
16581 			/*
16582 			 * The initial mode needs to be set in order to keep
16583 			 * the atomic core happy. It wants a valid mode if the
16584 			 * crtc's enabled, so we do the above call.
16585 			 *
16586 			 * But we don't set all the derived state fully, hence
16587 			 * set a flag to indicate that a full recalculation is
16588 			 * needed on the next commit.
16589 			 */
16590 			crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16591 
16592 			intel_crtc_compute_pixel_rate(crtc_state);
16593 
16594 			if (dev_priv->display.modeset_calc_cdclk) {
16595 				min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16596 				if (WARN_ON(min_cdclk < 0))
16597 					min_cdclk = 0;
16598 			}
16599 
16600 			drm_calc_timestamping_constants(&crtc->base,
16601 							&crtc_state->base.adjusted_mode);
16602 			update_scanline_offset(crtc_state);
16603 		}
16604 
16605 		dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16606 		dev_priv->min_voltage_level[crtc->pipe] =
16607 			crtc_state->min_voltage_level;
16608 
16609 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16610 			const struct intel_plane_state *plane_state =
16611 				to_intel_plane_state(plane->base.state);
16612 
16613 			/*
16614 			 * FIXME don't have the fb yet, so can't
16615 			 * use intel_plane_data_rate() :(
16616 			 */
16617 			if (plane_state->base.visible)
16618 				crtc_state->data_rate[plane->id] =
16619 					4 * crtc_state->pixel_rate;
16620 		}
16621 
16622 		intel_bw_crtc_update(bw_state, crtc_state);
16623 
16624 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
16625 	}
16626 }
16627 
16628 static void
16629 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16630 {
16631 	struct intel_encoder *encoder;
16632 
16633 	for_each_intel_encoder(&dev_priv->drm, encoder) {
16634 		struct intel_crtc_state *crtc_state;
16635 
16636 		if (!encoder->get_power_domains)
16637 			continue;
16638 
16639 		/*
16640 		 * MST-primary and inactive encoders don't have a crtc state
16641 		 * and neither of these require any power domain references.
16642 		 */
16643 		if (!encoder->base.crtc)
16644 			continue;
16645 
16646 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16647 		encoder->get_power_domains(encoder, crtc_state);
16648 	}
16649 }
16650 
16651 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16652 {
16653 	/* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16654 	if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16655 		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16656 			   DARBF_GATING_DIS);
16657 
16658 	if (IS_HASWELL(dev_priv)) {
16659 		/*
16660 		 * WaRsPkgCStateDisplayPMReq:hsw
16661 		 * System hang if this isn't done before disabling all planes!
16662 		 */
16663 		I915_WRITE(CHICKEN_PAR1_1,
16664 			   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16665 	}
16666 }
16667 
16668 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16669 				       enum port port, i915_reg_t hdmi_reg)
16670 {
16671 	u32 val = I915_READ(hdmi_reg);
16672 
16673 	if (val & SDVO_ENABLE ||
16674 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16675 		return;
16676 
16677 	DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16678 		      port_name(port));
16679 
16680 	val &= ~SDVO_PIPE_SEL_MASK;
16681 	val |= SDVO_PIPE_SEL(PIPE_A);
16682 
16683 	I915_WRITE(hdmi_reg, val);
16684 }
16685 
16686 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16687 				     enum port port, i915_reg_t dp_reg)
16688 {
16689 	u32 val = I915_READ(dp_reg);
16690 
16691 	if (val & DP_PORT_EN ||
16692 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16693 		return;
16694 
16695 	DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16696 		      port_name(port));
16697 
16698 	val &= ~DP_PIPE_SEL_MASK;
16699 	val |= DP_PIPE_SEL(PIPE_A);
16700 
16701 	I915_WRITE(dp_reg, val);
16702 }
16703 
16704 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16705 {
16706 	/*
16707 	 * The BIOS may select transcoder B on some of the PCH
16708 	 * ports even it doesn't enable the port. This would trip
16709 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16710 	 * Sanitize the transcoder select bits to prevent that. We
16711 	 * assume that the BIOS never actually enabled the port,
16712 	 * because if it did we'd actually have to toggle the port
16713 	 * on and back off to make the transcoder A select stick
16714 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16715 	 * intel_disable_sdvo()).
16716 	 */
16717 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16718 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16719 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16720 
16721 	/* PCH SDVOB multiplex with HDMIB */
16722 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16723 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16724 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16725 }
16726 
16727 /* Scan out the current hw modeset state,
16728  * and sanitizes it to the current state
16729  */
16730 static void
16731 intel_modeset_setup_hw_state(struct drm_device *dev,
16732 			     struct drm_modeset_acquire_ctx *ctx)
16733 {
16734 	struct drm_i915_private *dev_priv = to_i915(dev);
16735 	struct intel_crtc_state *crtc_state;
16736 	struct intel_encoder *encoder;
16737 	struct intel_crtc *crtc;
16738 	intel_wakeref_t wakeref;
16739 	int i;
16740 
16741 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16742 
16743 	intel_early_display_was(dev_priv);
16744 	intel_modeset_readout_hw_state(dev);
16745 
16746 	/* HW state is read out, now we need to sanitize this mess. */
16747 	get_encoder_power_domains(dev_priv);
16748 
16749 	if (HAS_PCH_IBX(dev_priv))
16750 		ibx_sanitize_pch_ports(dev_priv);
16751 
16752 	/*
16753 	 * intel_sanitize_plane_mapping() may need to do vblank
16754 	 * waits, so we need vblank interrupts restored beforehand.
16755 	 */
16756 	for_each_intel_crtc(&dev_priv->drm, crtc) {
16757 		crtc_state = to_intel_crtc_state(crtc->base.state);
16758 
16759 		drm_crtc_vblank_reset(&crtc->base);
16760 
16761 		if (crtc_state->base.active)
16762 			intel_crtc_vblank_on(crtc_state);
16763 	}
16764 
16765 	intel_sanitize_plane_mapping(dev_priv);
16766 
16767 	for_each_intel_encoder(dev, encoder)
16768 		intel_sanitize_encoder(encoder);
16769 
16770 	for_each_intel_crtc(&dev_priv->drm, crtc) {
16771 		crtc_state = to_intel_crtc_state(crtc->base.state);
16772 		intel_sanitize_crtc(crtc, ctx);
16773 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
16774 	}
16775 
16776 	intel_modeset_update_connector_atomic_state(dev);
16777 
16778 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16779 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16780 
16781 		if (!pll->on || pll->active_mask)
16782 			continue;
16783 
16784 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16785 			      pll->info->name);
16786 
16787 		pll->info->funcs->disable(dev_priv, pll);
16788 		pll->on = false;
16789 	}
16790 
16791 	if (IS_G4X(dev_priv)) {
16792 		g4x_wm_get_hw_state(dev_priv);
16793 		g4x_wm_sanitize(dev_priv);
16794 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16795 		vlv_wm_get_hw_state(dev_priv);
16796 		vlv_wm_sanitize(dev_priv);
16797 	} else if (INTEL_GEN(dev_priv) >= 9) {
16798 		skl_wm_get_hw_state(dev_priv);
16799 	} else if (HAS_PCH_SPLIT(dev_priv)) {
16800 		ilk_wm_get_hw_state(dev_priv);
16801 	}
16802 
16803 	for_each_intel_crtc(dev, crtc) {
16804 		u64 put_domains;
16805 
16806 		crtc_state = to_intel_crtc_state(crtc->base.state);
16807 		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
16808 		if (WARN_ON(put_domains))
16809 			modeset_put_power_domains(dev_priv, put_domains);
16810 	}
16811 
16812 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16813 
16814 	intel_fbc_init_pipe_state(dev_priv);
16815 }
16816 
16817 void intel_display_resume(struct drm_device *dev)
16818 {
16819 	struct drm_i915_private *dev_priv = to_i915(dev);
16820 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16821 	struct drm_modeset_acquire_ctx ctx;
16822 	int ret;
16823 
16824 	dev_priv->modeset_restore_state = NULL;
16825 	if (state)
16826 		state->acquire_ctx = &ctx;
16827 
16828 	drm_modeset_acquire_init(&ctx, 0);
16829 
16830 	while (1) {
16831 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
16832 		if (ret != -EDEADLK)
16833 			break;
16834 
16835 		drm_modeset_backoff(&ctx);
16836 	}
16837 
16838 	if (!ret)
16839 		ret = __intel_display_resume(dev, state, &ctx);
16840 
16841 	intel_enable_ipc(dev_priv);
16842 	drm_modeset_drop_locks(&ctx);
16843 	drm_modeset_acquire_fini(&ctx);
16844 
16845 	if (ret)
16846 		DRM_ERROR("Restoring old state failed with %i\n", ret);
16847 	if (state)
16848 		drm_atomic_state_put(state);
16849 }
16850 
16851 static void intel_hpd_poll_fini(struct drm_device *dev)
16852 {
16853 	struct intel_connector *connector;
16854 	struct drm_connector_list_iter conn_iter;
16855 
16856 	/* Kill all the work that may have been queued by hpd. */
16857 	drm_connector_list_iter_begin(dev, &conn_iter);
16858 	for_each_intel_connector_iter(connector, &conn_iter) {
16859 		if (connector->modeset_retry_work.func)
16860 			cancel_work_sync(&connector->modeset_retry_work);
16861 		if (connector->hdcp.shim) {
16862 			cancel_delayed_work_sync(&connector->hdcp.check_work);
16863 			cancel_work_sync(&connector->hdcp.prop_work);
16864 		}
16865 	}
16866 	drm_connector_list_iter_end(&conn_iter);
16867 }
16868 
16869 void intel_modeset_cleanup(struct drm_device *dev)
16870 {
16871 	struct drm_i915_private *dev_priv = to_i915(dev);
16872 
16873 	flush_workqueue(dev_priv->modeset_wq);
16874 
16875 	flush_work(&dev_priv->atomic_helper.free_work);
16876 	WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16877 
16878 	/*
16879 	 * Interrupts and polling as the first thing to avoid creating havoc.
16880 	 * Too much stuff here (turning of connectors, ...) would
16881 	 * experience fancy races otherwise.
16882 	 */
16883 	intel_irq_uninstall(dev_priv);
16884 
16885 	/*
16886 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
16887 	 * poll handlers. Hence disable polling after hpd handling is shut down.
16888 	 */
16889 	intel_hpd_poll_fini(dev);
16890 
16891 	/* poll work can call into fbdev, hence clean that up afterwards */
16892 	intel_fbdev_fini(dev_priv);
16893 
16894 	intel_unregister_dsm_handler();
16895 
16896 	intel_fbc_global_disable(dev_priv);
16897 
16898 	/* flush any delayed tasks or pending work */
16899 	flush_scheduled_work();
16900 
16901 	intel_hdcp_component_fini(dev_priv);
16902 
16903 	drm_mode_config_cleanup(dev);
16904 
16905 	intel_overlay_cleanup(dev_priv);
16906 
16907 	intel_gmbus_teardown(dev_priv);
16908 
16909 	destroy_workqueue(dev_priv->modeset_wq);
16910 
16911 	intel_fbc_cleanup_cfb(dev_priv);
16912 }
16913 
16914 /*
16915  * set vga decode state - true == enable VGA decode
16916  */
16917 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16918 {
16919 	unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16920 	u16 gmch_ctrl;
16921 
16922 	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16923 		DRM_ERROR("failed to read control word\n");
16924 		return -EIO;
16925 	}
16926 
16927 	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16928 		return 0;
16929 
16930 	if (state)
16931 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16932 	else
16933 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16934 
16935 	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16936 		DRM_ERROR("failed to write control word\n");
16937 		return -EIO;
16938 	}
16939 
16940 	return 0;
16941 }
16942 
16943 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16944 
16945 struct intel_display_error_state {
16946 
16947 	u32 power_well_driver;
16948 
16949 	struct intel_cursor_error_state {
16950 		u32 control;
16951 		u32 position;
16952 		u32 base;
16953 		u32 size;
16954 	} cursor[I915_MAX_PIPES];
16955 
16956 	struct intel_pipe_error_state {
16957 		bool power_domain_on;
16958 		u32 source;
16959 		u32 stat;
16960 	} pipe[I915_MAX_PIPES];
16961 
16962 	struct intel_plane_error_state {
16963 		u32 control;
16964 		u32 stride;
16965 		u32 size;
16966 		u32 pos;
16967 		u32 addr;
16968 		u32 surface;
16969 		u32 tile_offset;
16970 	} plane[I915_MAX_PIPES];
16971 
16972 	struct intel_transcoder_error_state {
16973 		bool available;
16974 		bool power_domain_on;
16975 		enum transcoder cpu_transcoder;
16976 
16977 		u32 conf;
16978 
16979 		u32 htotal;
16980 		u32 hblank;
16981 		u32 hsync;
16982 		u32 vtotal;
16983 		u32 vblank;
16984 		u32 vsync;
16985 	} transcoder[4];
16986 };
16987 
16988 struct intel_display_error_state *
16989 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16990 {
16991 	struct intel_display_error_state *error;
16992 	int transcoders[] = {
16993 		TRANSCODER_A,
16994 		TRANSCODER_B,
16995 		TRANSCODER_C,
16996 		TRANSCODER_EDP,
16997 	};
16998 	int i;
16999 
17000 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17001 
17002 	if (!HAS_DISPLAY(dev_priv))
17003 		return NULL;
17004 
17005 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
17006 	if (error == NULL)
17007 		return NULL;
17008 
17009 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17010 		error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17011 
17012 	for_each_pipe(dev_priv, i) {
17013 		error->pipe[i].power_domain_on =
17014 			__intel_display_power_is_enabled(dev_priv,
17015 							 POWER_DOMAIN_PIPE(i));
17016 		if (!error->pipe[i].power_domain_on)
17017 			continue;
17018 
17019 		error->cursor[i].control = I915_READ(CURCNTR(i));
17020 		error->cursor[i].position = I915_READ(CURPOS(i));
17021 		error->cursor[i].base = I915_READ(CURBASE(i));
17022 
17023 		error->plane[i].control = I915_READ(DSPCNTR(i));
17024 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17025 		if (INTEL_GEN(dev_priv) <= 3) {
17026 			error->plane[i].size = I915_READ(DSPSIZE(i));
17027 			error->plane[i].pos = I915_READ(DSPPOS(i));
17028 		}
17029 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17030 			error->plane[i].addr = I915_READ(DSPADDR(i));
17031 		if (INTEL_GEN(dev_priv) >= 4) {
17032 			error->plane[i].surface = I915_READ(DSPSURF(i));
17033 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17034 		}
17035 
17036 		error->pipe[i].source = I915_READ(PIPESRC(i));
17037 
17038 		if (HAS_GMCH(dev_priv))
17039 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
17040 	}
17041 
17042 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17043 		enum transcoder cpu_transcoder = transcoders[i];
17044 
17045 		if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
17046 			continue;
17047 
17048 		error->transcoder[i].available = true;
17049 		error->transcoder[i].power_domain_on =
17050 			__intel_display_power_is_enabled(dev_priv,
17051 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
17052 		if (!error->transcoder[i].power_domain_on)
17053 			continue;
17054 
17055 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
17056 
17057 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
17058 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
17059 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
17060 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
17061 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
17062 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
17063 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
17064 	}
17065 
17066 	return error;
17067 }
17068 
17069 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
17070 
17071 void
17072 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
17073 				struct intel_display_error_state *error)
17074 {
17075 	struct drm_i915_private *dev_priv = m->i915;
17076 	int i;
17077 
17078 	if (!error)
17079 		return;
17080 
17081 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
17082 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17083 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
17084 			   error->power_well_driver);
17085 	for_each_pipe(dev_priv, i) {
17086 		err_printf(m, "Pipe [%d]:\n", i);
17087 		err_printf(m, "  Power: %s\n",
17088 			   onoff(error->pipe[i].power_domain_on));
17089 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
17090 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
17091 
17092 		err_printf(m, "Plane [%d]:\n", i);
17093 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
17094 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
17095 		if (INTEL_GEN(dev_priv) <= 3) {
17096 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
17097 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
17098 		}
17099 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17100 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
17101 		if (INTEL_GEN(dev_priv) >= 4) {
17102 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
17103 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
17104 		}
17105 
17106 		err_printf(m, "Cursor [%d]:\n", i);
17107 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
17108 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
17109 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
17110 	}
17111 
17112 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17113 		if (!error->transcoder[i].available)
17114 			continue;
17115 
17116 		err_printf(m, "CPU transcoder: %s\n",
17117 			   transcoder_name(error->transcoder[i].cpu_transcoder));
17118 		err_printf(m, "  Power: %s\n",
17119 			   onoff(error->transcoder[i].power_domain_on));
17120 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
17121 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
17122 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
17123 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
17124 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
17125 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
17126 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
17127 	}
17128 }
17129 
17130 #endif
17131