1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_tv.h"
63 #include "display/intel_vdsc.h"
64 #include "display/intel_vrr.h"
65 
66 #include "gem/i915_gem_object.h"
67 
68 #include "gt/intel_rps.h"
69 
70 #include "g4x_dp.h"
71 #include "g4x_hdmi.h"
72 #include "i915_drv.h"
73 #include "intel_acpi.h"
74 #include "intel_atomic.h"
75 #include "intel_atomic_plane.h"
76 #include "intel_bw.h"
77 #include "intel_cdclk.h"
78 #include "intel_color.h"
79 #include "intel_crtc.h"
80 #include "intel_csr.h"
81 #include "intel_display_types.h"
82 #include "intel_dp_link_training.h"
83 #include "intel_fbc.h"
84 #include "intel_fdi.h"
85 #include "intel_fbdev.h"
86 #include "intel_fifo_underrun.h"
87 #include "intel_frontbuffer.h"
88 #include "intel_hdcp.h"
89 #include "intel_hotplug.h"
90 #include "intel_overlay.h"
91 #include "intel_pipe_crc.h"
92 #include "intel_pm.h"
93 #include "intel_pps.h"
94 #include "intel_psr.h"
95 #include "intel_quirks.h"
96 #include "intel_sideband.h"
97 #include "intel_sprite.h"
98 #include "intel_tc.h"
99 #include "intel_vga.h"
100 #include "i9xx_plane.h"
101 #include "skl_scaler.h"
102 #include "skl_universal_plane.h"
103 
104 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
105 				struct intel_crtc_state *pipe_config);
106 static void ilk_pch_clock_get(struct intel_crtc *crtc,
107 			      struct intel_crtc_state *pipe_config);
108 
109 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
110 				  struct drm_i915_gem_object *obj,
111 				  struct drm_mode_fb_cmd2 *mode_cmd);
112 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
113 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
114 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
115 					 const struct intel_link_m_n *m_n,
116 					 const struct intel_link_m_n *m2_n2);
117 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
118 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
119 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
120 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
121 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
122 static void intel_modeset_setup_hw_state(struct drm_device *dev,
123 					 struct drm_modeset_acquire_ctx *ctx);
124 
125 /* returns HPLL frequency in kHz */
126 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
127 {
128 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
129 
130 	/* Obtain SKU information */
131 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
132 		CCK_FUSE_HPLL_FREQ_MASK;
133 
134 	return vco_freq[hpll_freq] * 1000;
135 }
136 
137 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
138 		      const char *name, u32 reg, int ref_freq)
139 {
140 	u32 val;
141 	int divider;
142 
143 	val = vlv_cck_read(dev_priv, reg);
144 	divider = val & CCK_FREQUENCY_VALUES;
145 
146 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
147 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
148 		 "%s change in progress\n", name);
149 
150 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
151 }
152 
153 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
154 			   const char *name, u32 reg)
155 {
156 	int hpll;
157 
158 	vlv_cck_get(dev_priv);
159 
160 	if (dev_priv->hpll_freq == 0)
161 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
162 
163 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
164 
165 	vlv_cck_put(dev_priv);
166 
167 	return hpll;
168 }
169 
170 static void intel_update_czclk(struct drm_i915_private *dev_priv)
171 {
172 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
173 		return;
174 
175 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
176 						      CCK_CZ_CLOCK_CONTROL);
177 
178 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
179 		dev_priv->czclk_freq);
180 }
181 
182 /* WA Display #0827: Gen9:all */
183 static void
184 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
185 {
186 	if (enable)
187 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
188 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
189 	else
190 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
191 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
192 }
193 
194 /* Wa_2006604312:icl,ehl */
195 static void
196 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
197 		       bool enable)
198 {
199 	if (enable)
200 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
201 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
202 	else
203 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
204 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
205 }
206 
207 static bool
208 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
209 {
210 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
211 }
212 
213 static bool
214 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
215 {
216 	return crtc_state->sync_mode_slaves_mask != 0;
217 }
218 
219 bool
220 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
221 {
222 	return is_trans_port_sync_master(crtc_state) ||
223 		is_trans_port_sync_slave(crtc_state);
224 }
225 
226 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
227 				    enum pipe pipe)
228 {
229 	i915_reg_t reg = PIPEDSL(pipe);
230 	u32 line1, line2;
231 	u32 line_mask;
232 
233 	if (IS_DISPLAY_VER(dev_priv, 2))
234 		line_mask = DSL_LINEMASK_GEN2;
235 	else
236 		line_mask = DSL_LINEMASK_GEN3;
237 
238 	line1 = intel_de_read(dev_priv, reg) & line_mask;
239 	msleep(5);
240 	line2 = intel_de_read(dev_priv, reg) & line_mask;
241 
242 	return line1 != line2;
243 }
244 
245 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
246 {
247 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
248 	enum pipe pipe = crtc->pipe;
249 
250 	/* Wait for the display line to settle/start moving */
251 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
252 		drm_err(&dev_priv->drm,
253 			"pipe %c scanline %s wait timed out\n",
254 			pipe_name(pipe), onoff(state));
255 }
256 
257 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
258 {
259 	wait_for_pipe_scanline_moving(crtc, false);
260 }
261 
262 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
263 {
264 	wait_for_pipe_scanline_moving(crtc, true);
265 }
266 
267 static void
268 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
269 {
270 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
271 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
272 
273 	if (DISPLAY_VER(dev_priv) >= 4) {
274 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
275 		i915_reg_t reg = PIPECONF(cpu_transcoder);
276 
277 		/* Wait for the Pipe State to go off */
278 		if (intel_de_wait_for_clear(dev_priv, reg,
279 					    I965_PIPECONF_ACTIVE, 100))
280 			drm_WARN(&dev_priv->drm, 1,
281 				 "pipe_off wait timed out\n");
282 	} else {
283 		intel_wait_for_pipe_scanline_stopped(crtc);
284 	}
285 }
286 
287 /* Only for pre-ILK configs */
288 void assert_pll(struct drm_i915_private *dev_priv,
289 		enum pipe pipe, bool state)
290 {
291 	u32 val;
292 	bool cur_state;
293 
294 	val = intel_de_read(dev_priv, DPLL(pipe));
295 	cur_state = !!(val & DPLL_VCO_ENABLE);
296 	I915_STATE_WARN(cur_state != state,
297 	     "PLL state assertion failure (expected %s, current %s)\n",
298 			onoff(state), onoff(cur_state));
299 }
300 
301 /* XXX: the dsi pll is shared between MIPI DSI ports */
302 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
303 {
304 	u32 val;
305 	bool cur_state;
306 
307 	vlv_cck_get(dev_priv);
308 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
309 	vlv_cck_put(dev_priv);
310 
311 	cur_state = val & DSI_PLL_VCO_EN;
312 	I915_STATE_WARN(cur_state != state,
313 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
314 			onoff(state), onoff(cur_state));
315 }
316 
317 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
318 			  enum pipe pipe, bool state)
319 {
320 	bool cur_state;
321 
322 	if (HAS_DDI(dev_priv)) {
323 		/*
324 		 * DDI does not have a specific FDI_TX register.
325 		 *
326 		 * FDI is never fed from EDP transcoder
327 		 * so pipe->transcoder cast is fine here.
328 		 */
329 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
330 		u32 val = intel_de_read(dev_priv,
331 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
332 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
333 	} else {
334 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
335 		cur_state = !!(val & FDI_TX_ENABLE);
336 	}
337 	I915_STATE_WARN(cur_state != state,
338 	     "FDI TX state assertion failure (expected %s, current %s)\n",
339 			onoff(state), onoff(cur_state));
340 }
341 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
342 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
343 
344 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
345 			  enum pipe pipe, bool state)
346 {
347 	u32 val;
348 	bool cur_state;
349 
350 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
351 	cur_state = !!(val & FDI_RX_ENABLE);
352 	I915_STATE_WARN(cur_state != state,
353 	     "FDI RX state assertion failure (expected %s, current %s)\n",
354 			onoff(state), onoff(cur_state));
355 }
356 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
357 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
358 
359 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
360 				      enum pipe pipe)
361 {
362 	u32 val;
363 
364 	/* ILK FDI PLL is always enabled */
365 	if (IS_IRONLAKE(dev_priv))
366 		return;
367 
368 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
369 	if (HAS_DDI(dev_priv))
370 		return;
371 
372 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
373 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
374 }
375 
376 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
377 		       enum pipe pipe, bool state)
378 {
379 	u32 val;
380 	bool cur_state;
381 
382 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
383 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
384 	I915_STATE_WARN(cur_state != state,
385 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
386 			onoff(state), onoff(cur_state));
387 }
388 
389 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
390 {
391 	i915_reg_t pp_reg;
392 	u32 val;
393 	enum pipe panel_pipe = INVALID_PIPE;
394 	bool locked = true;
395 
396 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
397 		return;
398 
399 	if (HAS_PCH_SPLIT(dev_priv)) {
400 		u32 port_sel;
401 
402 		pp_reg = PP_CONTROL(0);
403 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
404 
405 		switch (port_sel) {
406 		case PANEL_PORT_SELECT_LVDS:
407 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
408 			break;
409 		case PANEL_PORT_SELECT_DPA:
410 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
411 			break;
412 		case PANEL_PORT_SELECT_DPC:
413 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
414 			break;
415 		case PANEL_PORT_SELECT_DPD:
416 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
417 			break;
418 		default:
419 			MISSING_CASE(port_sel);
420 			break;
421 		}
422 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
423 		/* presumably write lock depends on pipe, not port select */
424 		pp_reg = PP_CONTROL(pipe);
425 		panel_pipe = pipe;
426 	} else {
427 		u32 port_sel;
428 
429 		pp_reg = PP_CONTROL(0);
430 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
431 
432 		drm_WARN_ON(&dev_priv->drm,
433 			    port_sel != PANEL_PORT_SELECT_LVDS);
434 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
435 	}
436 
437 	val = intel_de_read(dev_priv, pp_reg);
438 	if (!(val & PANEL_POWER_ON) ||
439 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
440 		locked = false;
441 
442 	I915_STATE_WARN(panel_pipe == pipe && locked,
443 	     "panel assertion failure, pipe %c regs locked\n",
444 	     pipe_name(pipe));
445 }
446 
447 void assert_pipe(struct drm_i915_private *dev_priv,
448 		 enum transcoder cpu_transcoder, bool state)
449 {
450 	bool cur_state;
451 	enum intel_display_power_domain power_domain;
452 	intel_wakeref_t wakeref;
453 
454 	/* we keep both pipes enabled on 830 */
455 	if (IS_I830(dev_priv))
456 		state = true;
457 
458 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
459 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
460 	if (wakeref) {
461 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
462 		cur_state = !!(val & PIPECONF_ENABLE);
463 
464 		intel_display_power_put(dev_priv, power_domain, wakeref);
465 	} else {
466 		cur_state = false;
467 	}
468 
469 	I915_STATE_WARN(cur_state != state,
470 			"transcoder %s assertion failure (expected %s, current %s)\n",
471 			transcoder_name(cpu_transcoder),
472 			onoff(state), onoff(cur_state));
473 }
474 
475 static void assert_plane(struct intel_plane *plane, bool state)
476 {
477 	enum pipe pipe;
478 	bool cur_state;
479 
480 	cur_state = plane->get_hw_state(plane, &pipe);
481 
482 	I915_STATE_WARN(cur_state != state,
483 			"%s assertion failure (expected %s, current %s)\n",
484 			plane->base.name, onoff(state), onoff(cur_state));
485 }
486 
487 #define assert_plane_enabled(p) assert_plane(p, true)
488 #define assert_plane_disabled(p) assert_plane(p, false)
489 
490 static void assert_planes_disabled(struct intel_crtc *crtc)
491 {
492 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
493 	struct intel_plane *plane;
494 
495 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
496 		assert_plane_disabled(plane);
497 }
498 
499 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
500 				    enum pipe pipe)
501 {
502 	u32 val;
503 	bool enabled;
504 
505 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
506 	enabled = !!(val & TRANS_ENABLE);
507 	I915_STATE_WARN(enabled,
508 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
509 	     pipe_name(pipe));
510 }
511 
512 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
513 				   enum pipe pipe, enum port port,
514 				   i915_reg_t dp_reg)
515 {
516 	enum pipe port_pipe;
517 	bool state;
518 
519 	state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
520 
521 	I915_STATE_WARN(state && port_pipe == pipe,
522 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
523 			port_name(port), pipe_name(pipe));
524 
525 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
526 			"IBX PCH DP %c still using transcoder B\n",
527 			port_name(port));
528 }
529 
530 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
531 				     enum pipe pipe, enum port port,
532 				     i915_reg_t hdmi_reg)
533 {
534 	enum pipe port_pipe;
535 	bool state;
536 
537 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
538 
539 	I915_STATE_WARN(state && port_pipe == pipe,
540 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
541 			port_name(port), pipe_name(pipe));
542 
543 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
544 			"IBX PCH HDMI %c still using transcoder B\n",
545 			port_name(port));
546 }
547 
548 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
549 				      enum pipe pipe)
550 {
551 	enum pipe port_pipe;
552 
553 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
554 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
555 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
556 
557 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
558 			port_pipe == pipe,
559 			"PCH VGA enabled on transcoder %c, should be disabled\n",
560 			pipe_name(pipe));
561 
562 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
563 			port_pipe == pipe,
564 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
565 			pipe_name(pipe));
566 
567 	/* PCH SDVOB multiplex with HDMIB */
568 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
569 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
570 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
571 }
572 
573 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
574 			 struct intel_digital_port *dig_port,
575 			 unsigned int expected_mask)
576 {
577 	u32 port_mask;
578 	i915_reg_t dpll_reg;
579 
580 	switch (dig_port->base.port) {
581 	case PORT_B:
582 		port_mask = DPLL_PORTB_READY_MASK;
583 		dpll_reg = DPLL(0);
584 		break;
585 	case PORT_C:
586 		port_mask = DPLL_PORTC_READY_MASK;
587 		dpll_reg = DPLL(0);
588 		expected_mask <<= 4;
589 		break;
590 	case PORT_D:
591 		port_mask = DPLL_PORTD_READY_MASK;
592 		dpll_reg = DPIO_PHY_STATUS;
593 		break;
594 	default:
595 		BUG();
596 	}
597 
598 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
599 				       port_mask, expected_mask, 1000))
600 		drm_WARN(&dev_priv->drm, 1,
601 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
602 			 dig_port->base.base.base.id, dig_port->base.base.name,
603 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
604 			 expected_mask);
605 }
606 
607 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
608 {
609 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
610 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
611 	enum pipe pipe = crtc->pipe;
612 	i915_reg_t reg;
613 	u32 val, pipeconf_val;
614 
615 	/* Make sure PCH DPLL is enabled */
616 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
617 
618 	/* FDI must be feeding us bits for PCH ports */
619 	assert_fdi_tx_enabled(dev_priv, pipe);
620 	assert_fdi_rx_enabled(dev_priv, pipe);
621 
622 	if (HAS_PCH_CPT(dev_priv)) {
623 		reg = TRANS_CHICKEN2(pipe);
624 		val = intel_de_read(dev_priv, reg);
625 		/*
626 		 * Workaround: Set the timing override bit
627 		 * before enabling the pch transcoder.
628 		 */
629 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
630 		/* Configure frame start delay to match the CPU */
631 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
632 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
633 		intel_de_write(dev_priv, reg, val);
634 	}
635 
636 	reg = PCH_TRANSCONF(pipe);
637 	val = intel_de_read(dev_priv, reg);
638 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
639 
640 	if (HAS_PCH_IBX(dev_priv)) {
641 		/* Configure frame start delay to match the CPU */
642 		val &= ~TRANS_FRAME_START_DELAY_MASK;
643 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
644 
645 		/*
646 		 * Make the BPC in transcoder be consistent with
647 		 * that in pipeconf reg. For HDMI we must use 8bpc
648 		 * here for both 8bpc and 12bpc.
649 		 */
650 		val &= ~PIPECONF_BPC_MASK;
651 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
652 			val |= PIPECONF_8BPC;
653 		else
654 			val |= pipeconf_val & PIPECONF_BPC_MASK;
655 	}
656 
657 	val &= ~TRANS_INTERLACE_MASK;
658 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
659 		if (HAS_PCH_IBX(dev_priv) &&
660 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
661 			val |= TRANS_LEGACY_INTERLACED_ILK;
662 		else
663 			val |= TRANS_INTERLACED;
664 	} else {
665 		val |= TRANS_PROGRESSIVE;
666 	}
667 
668 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
669 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
670 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
671 			pipe_name(pipe));
672 }
673 
674 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
675 				      enum transcoder cpu_transcoder)
676 {
677 	u32 val, pipeconf_val;
678 
679 	/* FDI must be feeding us bits for PCH ports */
680 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
681 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
682 
683 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
684 	/* Workaround: set timing override bit. */
685 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
686 	/* Configure frame start delay to match the CPU */
687 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
688 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
689 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
690 
691 	val = TRANS_ENABLE;
692 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
693 
694 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
695 	    PIPECONF_INTERLACED_ILK)
696 		val |= TRANS_INTERLACED;
697 	else
698 		val |= TRANS_PROGRESSIVE;
699 
700 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
701 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
702 				  TRANS_STATE_ENABLE, 100))
703 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
704 }
705 
706 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
707 				       enum pipe pipe)
708 {
709 	i915_reg_t reg;
710 	u32 val;
711 
712 	/* FDI relies on the transcoder */
713 	assert_fdi_tx_disabled(dev_priv, pipe);
714 	assert_fdi_rx_disabled(dev_priv, pipe);
715 
716 	/* Ports must be off as well */
717 	assert_pch_ports_disabled(dev_priv, pipe);
718 
719 	reg = PCH_TRANSCONF(pipe);
720 	val = intel_de_read(dev_priv, reg);
721 	val &= ~TRANS_ENABLE;
722 	intel_de_write(dev_priv, reg, val);
723 	/* wait for PCH transcoder off, transcoder state */
724 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
725 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
726 			pipe_name(pipe));
727 
728 	if (HAS_PCH_CPT(dev_priv)) {
729 		/* Workaround: Clear the timing override chicken bit again. */
730 		reg = TRANS_CHICKEN2(pipe);
731 		val = intel_de_read(dev_priv, reg);
732 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
733 		intel_de_write(dev_priv, reg, val);
734 	}
735 }
736 
737 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
738 {
739 	u32 val;
740 
741 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
742 	val &= ~TRANS_ENABLE;
743 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
744 	/* wait for PCH transcoder off, transcoder state */
745 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
746 				    TRANS_STATE_ENABLE, 50))
747 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
748 
749 	/* Workaround: clear timing override bit. */
750 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
751 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
752 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
753 }
754 
755 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
756 {
757 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
758 
759 	if (HAS_PCH_LPT(dev_priv))
760 		return PIPE_A;
761 	else
762 		return crtc->pipe;
763 }
764 
765 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
766 {
767 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
768 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
769 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
770 	enum pipe pipe = crtc->pipe;
771 	i915_reg_t reg;
772 	u32 val;
773 
774 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
775 
776 	assert_planes_disabled(crtc);
777 
778 	/*
779 	 * A pipe without a PLL won't actually be able to drive bits from
780 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
781 	 * need the check.
782 	 */
783 	if (HAS_GMCH(dev_priv)) {
784 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
785 			assert_dsi_pll_enabled(dev_priv);
786 		else
787 			assert_pll_enabled(dev_priv, pipe);
788 	} else {
789 		if (new_crtc_state->has_pch_encoder) {
790 			/* if driving the PCH, we need FDI enabled */
791 			assert_fdi_rx_pll_enabled(dev_priv,
792 						  intel_crtc_pch_transcoder(crtc));
793 			assert_fdi_tx_pll_enabled(dev_priv,
794 						  (enum pipe) cpu_transcoder);
795 		}
796 		/* FIXME: assert CPU port conditions for SNB+ */
797 	}
798 
799 	reg = PIPECONF(cpu_transcoder);
800 	val = intel_de_read(dev_priv, reg);
801 	if (val & PIPECONF_ENABLE) {
802 		/* we keep both pipes enabled on 830 */
803 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
804 		return;
805 	}
806 
807 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
808 	intel_de_posting_read(dev_priv, reg);
809 
810 	/*
811 	 * Until the pipe starts PIPEDSL reads will return a stale value,
812 	 * which causes an apparent vblank timestamp jump when PIPEDSL
813 	 * resets to its proper value. That also messes up the frame count
814 	 * when it's derived from the timestamps. So let's wait for the
815 	 * pipe to start properly before we call drm_crtc_vblank_on()
816 	 */
817 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
818 		intel_wait_for_pipe_scanline_moving(crtc);
819 }
820 
821 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
822 {
823 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
824 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
825 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
826 	enum pipe pipe = crtc->pipe;
827 	i915_reg_t reg;
828 	u32 val;
829 
830 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
831 
832 	/*
833 	 * Make sure planes won't keep trying to pump pixels to us,
834 	 * or we might hang the display.
835 	 */
836 	assert_planes_disabled(crtc);
837 
838 	reg = PIPECONF(cpu_transcoder);
839 	val = intel_de_read(dev_priv, reg);
840 	if ((val & PIPECONF_ENABLE) == 0)
841 		return;
842 
843 	/*
844 	 * Double wide has implications for planes
845 	 * so best keep it disabled when not needed.
846 	 */
847 	if (old_crtc_state->double_wide)
848 		val &= ~PIPECONF_DOUBLE_WIDE;
849 
850 	/* Don't disable pipe or pipe PLLs if needed */
851 	if (!IS_I830(dev_priv))
852 		val &= ~PIPECONF_ENABLE;
853 
854 	intel_de_write(dev_priv, reg, val);
855 	if ((val & PIPECONF_ENABLE) == 0)
856 		intel_wait_for_pipe_off(old_crtc_state);
857 }
858 
859 bool
860 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
861 				    u64 modifier)
862 {
863 	return info->is_yuv &&
864 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
865 }
866 
867 unsigned int
868 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
869 {
870 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
871 	unsigned int cpp = fb->format->cpp[color_plane];
872 
873 	switch (fb->modifier) {
874 	case DRM_FORMAT_MOD_LINEAR:
875 		return intel_tile_size(dev_priv);
876 	case I915_FORMAT_MOD_X_TILED:
877 		if (IS_DISPLAY_VER(dev_priv, 2))
878 			return 128;
879 		else
880 			return 512;
881 	case I915_FORMAT_MOD_Y_TILED_CCS:
882 		if (is_ccs_plane(fb, color_plane))
883 			return 128;
884 		fallthrough;
885 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
886 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
887 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
888 		if (is_ccs_plane(fb, color_plane))
889 			return 64;
890 		fallthrough;
891 	case I915_FORMAT_MOD_Y_TILED:
892 		if (IS_DISPLAY_VER(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
893 			return 128;
894 		else
895 			return 512;
896 	case I915_FORMAT_MOD_Yf_TILED_CCS:
897 		if (is_ccs_plane(fb, color_plane))
898 			return 128;
899 		fallthrough;
900 	case I915_FORMAT_MOD_Yf_TILED:
901 		switch (cpp) {
902 		case 1:
903 			return 64;
904 		case 2:
905 		case 4:
906 			return 128;
907 		case 8:
908 		case 16:
909 			return 256;
910 		default:
911 			MISSING_CASE(cpp);
912 			return cpp;
913 		}
914 		break;
915 	default:
916 		MISSING_CASE(fb->modifier);
917 		return cpp;
918 	}
919 }
920 
921 unsigned int
922 intel_fb_align_height(const struct drm_framebuffer *fb,
923 		      int color_plane, unsigned int height)
924 {
925 	unsigned int tile_height = intel_tile_height(fb, color_plane);
926 
927 	return ALIGN(height, tile_height);
928 }
929 
930 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
931 {
932 	unsigned int size = 0;
933 	int i;
934 
935 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
936 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
937 
938 	return size;
939 }
940 
941 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
942 {
943 	unsigned int size = 0;
944 	int i;
945 
946 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
947 		size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
948 
949 	return size;
950 }
951 
952 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
953 {
954 	if (DISPLAY_VER(dev_priv) >= 9)
955 		return 256 * 1024;
956 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
957 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
958 		return 128 * 1024;
959 	else if (DISPLAY_VER(dev_priv) >= 4)
960 		return 4 * 1024;
961 	else
962 		return 0;
963 }
964 
965 static bool has_async_flips(struct drm_i915_private *i915)
966 {
967 	return DISPLAY_VER(i915) >= 5;
968 }
969 
970 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
971 				  int color_plane)
972 {
973 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
974 
975 	/* AUX_DIST needs only 4K alignment */
976 	if ((DISPLAY_VER(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
977 	    is_ccs_plane(fb, color_plane))
978 		return 4096;
979 
980 	switch (fb->modifier) {
981 	case DRM_FORMAT_MOD_LINEAR:
982 		return intel_linear_alignment(dev_priv);
983 	case I915_FORMAT_MOD_X_TILED:
984 		if (has_async_flips(dev_priv))
985 			return 256 * 1024;
986 		return 0;
987 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
988 		if (is_semiplanar_uv_plane(fb, color_plane))
989 			return intel_tile_row_size(fb, color_plane);
990 		fallthrough;
991 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
992 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
993 		return 16 * 1024;
994 	case I915_FORMAT_MOD_Y_TILED_CCS:
995 	case I915_FORMAT_MOD_Yf_TILED_CCS:
996 	case I915_FORMAT_MOD_Y_TILED:
997 		if (DISPLAY_VER(dev_priv) >= 12 &&
998 		    is_semiplanar_uv_plane(fb, color_plane))
999 			return intel_tile_row_size(fb, color_plane);
1000 		fallthrough;
1001 	case I915_FORMAT_MOD_Yf_TILED:
1002 		return 1 * 1024 * 1024;
1003 	default:
1004 		MISSING_CASE(fb->modifier);
1005 		return 0;
1006 	}
1007 }
1008 
1009 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1010 {
1011 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1012 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1013 
1014 	return DISPLAY_VER(dev_priv) < 4 ||
1015 		(plane->has_fbc &&
1016 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1017 }
1018 
1019 struct i915_vma *
1020 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1021 			   bool phys_cursor,
1022 			   const struct i915_ggtt_view *view,
1023 			   bool uses_fence,
1024 			   unsigned long *out_flags)
1025 {
1026 	struct drm_device *dev = fb->dev;
1027 	struct drm_i915_private *dev_priv = to_i915(dev);
1028 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1029 	intel_wakeref_t wakeref;
1030 	struct i915_gem_ww_ctx ww;
1031 	struct i915_vma *vma;
1032 	unsigned int pinctl;
1033 	u32 alignment;
1034 	int ret;
1035 
1036 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1037 		return ERR_PTR(-EINVAL);
1038 
1039 	if (phys_cursor)
1040 		alignment = intel_cursor_alignment(dev_priv);
1041 	else
1042 		alignment = intel_surf_alignment(fb, 0);
1043 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1044 		return ERR_PTR(-EINVAL);
1045 
1046 	/* Note that the w/a also requires 64 PTE of padding following the
1047 	 * bo. We currently fill all unused PTE with the shadow page and so
1048 	 * we should always have valid PTE following the scanout preventing
1049 	 * the VT-d warning.
1050 	 */
1051 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1052 		alignment = 256 * 1024;
1053 
1054 	/*
1055 	 * Global gtt pte registers are special registers which actually forward
1056 	 * writes to a chunk of system memory. Which means that there is no risk
1057 	 * that the register values disappear as soon as we call
1058 	 * intel_runtime_pm_put(), so it is correct to wrap only the
1059 	 * pin/unpin/fence and not more.
1060 	 */
1061 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1062 
1063 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1064 
1065 	/*
1066 	 * Valleyview is definitely limited to scanning out the first
1067 	 * 512MiB. Lets presume this behaviour was inherited from the
1068 	 * g4x display engine and that all earlier gen are similarly
1069 	 * limited. Testing suggests that it is a little more
1070 	 * complicated than this. For example, Cherryview appears quite
1071 	 * happy to scanout from anywhere within its global aperture.
1072 	 */
1073 	pinctl = 0;
1074 	if (HAS_GMCH(dev_priv))
1075 		pinctl |= PIN_MAPPABLE;
1076 
1077 	i915_gem_ww_ctx_init(&ww, true);
1078 retry:
1079 	ret = i915_gem_object_lock(obj, &ww);
1080 	if (!ret && phys_cursor)
1081 		ret = i915_gem_object_attach_phys(obj, alignment);
1082 	if (!ret)
1083 		ret = i915_gem_object_pin_pages(obj);
1084 	if (ret)
1085 		goto err;
1086 
1087 	if (!ret) {
1088 		vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1089 							   view, pinctl);
1090 		if (IS_ERR(vma)) {
1091 			ret = PTR_ERR(vma);
1092 			goto err_unpin;
1093 		}
1094 	}
1095 
1096 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1097 		/*
1098 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
1099 		 * fence, whereas 965+ only requires a fence if using
1100 		 * framebuffer compression.  For simplicity, we always, when
1101 		 * possible, install a fence as the cost is not that onerous.
1102 		 *
1103 		 * If we fail to fence the tiled scanout, then either the
1104 		 * modeset will reject the change (which is highly unlikely as
1105 		 * the affected systems, all but one, do not have unmappable
1106 		 * space) or we will not be able to enable full powersaving
1107 		 * techniques (also likely not to apply due to various limits
1108 		 * FBC and the like impose on the size of the buffer, which
1109 		 * presumably we violated anyway with this unmappable buffer).
1110 		 * Anyway, it is presumably better to stumble onwards with
1111 		 * something and try to run the system in a "less than optimal"
1112 		 * mode that matches the user configuration.
1113 		 */
1114 		ret = i915_vma_pin_fence(vma);
1115 		if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1116 			i915_vma_unpin(vma);
1117 			goto err_unpin;
1118 		}
1119 		ret = 0;
1120 
1121 		if (vma->fence)
1122 			*out_flags |= PLANE_HAS_FENCE;
1123 	}
1124 
1125 	i915_vma_get(vma);
1126 
1127 err_unpin:
1128 	i915_gem_object_unpin_pages(obj);
1129 err:
1130 	if (ret == -EDEADLK) {
1131 		ret = i915_gem_ww_ctx_backoff(&ww);
1132 		if (!ret)
1133 			goto retry;
1134 	}
1135 	i915_gem_ww_ctx_fini(&ww);
1136 	if (ret)
1137 		vma = ERR_PTR(ret);
1138 
1139 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1140 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1141 	return vma;
1142 }
1143 
1144 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1145 {
1146 	if (flags & PLANE_HAS_FENCE)
1147 		i915_vma_unpin_fence(vma);
1148 	i915_vma_unpin(vma);
1149 	i915_vma_put(vma);
1150 }
1151 
1152 /*
1153  * Convert the x/y offsets into a linear offset.
1154  * Only valid with 0/180 degree rotation, which is fine since linear
1155  * offset is only used with linear buffers on pre-hsw and tiled buffers
1156  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1157  */
1158 u32 intel_fb_xy_to_linear(int x, int y,
1159 			  const struct intel_plane_state *state,
1160 			  int color_plane)
1161 {
1162 	const struct drm_framebuffer *fb = state->hw.fb;
1163 	unsigned int cpp = fb->format->cpp[color_plane];
1164 	unsigned int pitch = state->view.color_plane[color_plane].stride;
1165 
1166 	return y * pitch + x * cpp;
1167 }
1168 
1169 /*
1170  * Add the x/y offsets derived from fb->offsets[] to the user
1171  * specified plane src x/y offsets. The resulting x/y offsets
1172  * specify the start of scanout from the beginning of the gtt mapping.
1173  */
1174 void intel_add_fb_offsets(int *x, int *y,
1175 			  const struct intel_plane_state *state,
1176 			  int color_plane)
1177 
1178 {
1179 	*x += state->view.color_plane[color_plane].x;
1180 	*y += state->view.color_plane[color_plane].y;
1181 }
1182 
1183 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1184 {
1185 	switch (fb_modifier) {
1186 	case I915_FORMAT_MOD_X_TILED:
1187 		return I915_TILING_X;
1188 	case I915_FORMAT_MOD_Y_TILED:
1189 	case I915_FORMAT_MOD_Y_TILED_CCS:
1190 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1191 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1192 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1193 		return I915_TILING_Y;
1194 	default:
1195 		return I915_TILING_NONE;
1196 	}
1197 }
1198 
1199 /*
1200  * From the Sky Lake PRM:
1201  * "The Color Control Surface (CCS) contains the compression status of
1202  *  the cache-line pairs. The compression state of the cache-line pair
1203  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1204  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1205  *  cache-line-pairs. CCS is always Y tiled."
1206  *
1207  * Since cache line pairs refers to horizontally adjacent cache lines,
1208  * each cache line in the CCS corresponds to an area of 32x16 cache
1209  * lines on the main surface. Since each pixel is 4 bytes, this gives
1210  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1211  * main surface.
1212  */
1213 static const struct drm_format_info skl_ccs_formats[] = {
1214 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1215 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1216 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1217 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1218 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1219 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1220 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1221 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1222 };
1223 
1224 /*
1225  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1226  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1227  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1228  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1229  * the main surface.
1230  */
1231 static const struct drm_format_info gen12_ccs_formats[] = {
1232 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1233 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1234 	  .hsub = 1, .vsub = 1, },
1235 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1236 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1237 	  .hsub = 1, .vsub = 1, },
1238 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1239 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1240 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1241 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1242 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1243 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1244 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
1245 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1246 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1247 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
1248 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1249 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1250 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
1251 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1252 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1253 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
1254 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1255 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1256 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
1257 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1258 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1259 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
1260 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1261 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1262 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
1263 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1264 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1265 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
1266 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1267 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1268 };
1269 
1270 /*
1271  * Same as gen12_ccs_formats[] above, but with additional surface used
1272  * to pass Clear Color information in plane 2 with 64 bits of data.
1273  */
1274 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1275 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1276 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1277 	  .hsub = 1, .vsub = 1, },
1278 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1279 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1280 	  .hsub = 1, .vsub = 1, },
1281 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1282 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1283 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1284 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1285 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1286 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1287 };
1288 
1289 static const struct drm_format_info *
1290 lookup_format_info(const struct drm_format_info formats[],
1291 		   int num_formats, u32 format)
1292 {
1293 	int i;
1294 
1295 	for (i = 0; i < num_formats; i++) {
1296 		if (formats[i].format == format)
1297 			return &formats[i];
1298 	}
1299 
1300 	return NULL;
1301 }
1302 
1303 static const struct drm_format_info *
1304 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1305 {
1306 	switch (cmd->modifier[0]) {
1307 	case I915_FORMAT_MOD_Y_TILED_CCS:
1308 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1309 		return lookup_format_info(skl_ccs_formats,
1310 					  ARRAY_SIZE(skl_ccs_formats),
1311 					  cmd->pixel_format);
1312 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1313 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1314 		return lookup_format_info(gen12_ccs_formats,
1315 					  ARRAY_SIZE(gen12_ccs_formats),
1316 					  cmd->pixel_format);
1317 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1318 		return lookup_format_info(gen12_ccs_cc_formats,
1319 					  ARRAY_SIZE(gen12_ccs_cc_formats),
1320 					  cmd->pixel_format);
1321 	default:
1322 		return NULL;
1323 	}
1324 }
1325 
1326 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1327 {
1328 	return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1329 			    512) * 64;
1330 }
1331 
1332 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1333 			      u32 pixel_format, u64 modifier)
1334 {
1335 	struct intel_crtc *crtc;
1336 	struct intel_plane *plane;
1337 
1338 	/*
1339 	 * We assume the primary plane for pipe A has
1340 	 * the highest stride limits of them all,
1341 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1342 	 */
1343 	crtc = intel_get_first_crtc(dev_priv);
1344 	if (!crtc)
1345 		return 0;
1346 
1347 	plane = to_intel_plane(crtc->base.primary);
1348 
1349 	return plane->max_stride(plane, pixel_format, modifier,
1350 				 DRM_MODE_ROTATE_0);
1351 }
1352 
1353 static
1354 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1355 			u32 pixel_format, u64 modifier)
1356 {
1357 	/*
1358 	 * Arbitrary limit for gen4+ chosen to match the
1359 	 * render engine max stride.
1360 	 *
1361 	 * The new CCS hash mode makes remapping impossible
1362 	 */
1363 	if (!is_ccs_modifier(modifier)) {
1364 		if (DISPLAY_VER(dev_priv) >= 7)
1365 			return 256*1024;
1366 		else if (DISPLAY_VER(dev_priv) >= 4)
1367 			return 128*1024;
1368 	}
1369 
1370 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1371 }
1372 
1373 static u32
1374 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1375 {
1376 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1377 	u32 tile_width;
1378 
1379 	if (is_surface_linear(fb, color_plane)) {
1380 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1381 							   fb->format->format,
1382 							   fb->modifier);
1383 
1384 		/*
1385 		 * To make remapping with linear generally feasible
1386 		 * we need the stride to be page aligned.
1387 		 */
1388 		if (fb->pitches[color_plane] > max_stride &&
1389 		    !is_ccs_modifier(fb->modifier))
1390 			return intel_tile_size(dev_priv);
1391 		else
1392 			return 64;
1393 	}
1394 
1395 	tile_width = intel_tile_width_bytes(fb, color_plane);
1396 	if (is_ccs_modifier(fb->modifier)) {
1397 		/*
1398 		 * Display WA #0531: skl,bxt,kbl,glk
1399 		 *
1400 		 * Render decompression and plane width > 3840
1401 		 * combined with horizontal panning requires the
1402 		 * plane stride to be a multiple of 4. We'll just
1403 		 * require the entire fb to accommodate that to avoid
1404 		 * potential runtime errors at plane configuration time.
1405 		 */
1406 		if (IS_DISPLAY_VER(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
1407 			tile_width *= 4;
1408 		/*
1409 		 * The main surface pitch must be padded to a multiple of four
1410 		 * tile widths.
1411 		 */
1412 		else if (DISPLAY_VER(dev_priv) >= 12)
1413 			tile_width *= 4;
1414 	}
1415 	return tile_width;
1416 }
1417 
1418 static struct i915_vma *
1419 initial_plane_vma(struct drm_i915_private *i915,
1420 		  struct intel_initial_plane_config *plane_config)
1421 {
1422 	struct drm_i915_gem_object *obj;
1423 	struct i915_vma *vma;
1424 	u32 base, size;
1425 
1426 	if (plane_config->size == 0)
1427 		return NULL;
1428 
1429 	base = round_down(plane_config->base,
1430 			  I915_GTT_MIN_ALIGNMENT);
1431 	size = round_up(plane_config->base + plane_config->size,
1432 			I915_GTT_MIN_ALIGNMENT);
1433 	size -= base;
1434 
1435 	/*
1436 	 * If the FB is too big, just don't use it since fbdev is not very
1437 	 * important and we should probably use that space with FBC or other
1438 	 * features.
1439 	 */
1440 	if (size * 2 > i915->stolen_usable_size)
1441 		return NULL;
1442 
1443 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1444 	if (IS_ERR(obj))
1445 		return NULL;
1446 
1447 	/*
1448 	 * Mark it WT ahead of time to avoid changing the
1449 	 * cache_level during fbdev initialization. The
1450 	 * unbind there would get stuck waiting for rcu.
1451 	 */
1452 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1453 					    I915_CACHE_WT : I915_CACHE_NONE);
1454 
1455 	switch (plane_config->tiling) {
1456 	case I915_TILING_NONE:
1457 		break;
1458 	case I915_TILING_X:
1459 	case I915_TILING_Y:
1460 		obj->tiling_and_stride =
1461 			plane_config->fb->base.pitches[0] |
1462 			plane_config->tiling;
1463 		break;
1464 	default:
1465 		MISSING_CASE(plane_config->tiling);
1466 		goto err_obj;
1467 	}
1468 
1469 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1470 	if (IS_ERR(vma))
1471 		goto err_obj;
1472 
1473 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1474 		goto err_obj;
1475 
1476 	if (i915_gem_object_is_tiled(obj) &&
1477 	    !i915_vma_is_map_and_fenceable(vma))
1478 		goto err_obj;
1479 
1480 	return vma;
1481 
1482 err_obj:
1483 	i915_gem_object_put(obj);
1484 	return NULL;
1485 }
1486 
1487 static bool
1488 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1489 			      struct intel_initial_plane_config *plane_config)
1490 {
1491 	struct drm_device *dev = crtc->base.dev;
1492 	struct drm_i915_private *dev_priv = to_i915(dev);
1493 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1494 	struct drm_framebuffer *fb = &plane_config->fb->base;
1495 	struct i915_vma *vma;
1496 
1497 	switch (fb->modifier) {
1498 	case DRM_FORMAT_MOD_LINEAR:
1499 	case I915_FORMAT_MOD_X_TILED:
1500 	case I915_FORMAT_MOD_Y_TILED:
1501 		break;
1502 	default:
1503 		drm_dbg(&dev_priv->drm,
1504 			"Unsupported modifier for initial FB: 0x%llx\n",
1505 			fb->modifier);
1506 		return false;
1507 	}
1508 
1509 	vma = initial_plane_vma(dev_priv, plane_config);
1510 	if (!vma)
1511 		return false;
1512 
1513 	mode_cmd.pixel_format = fb->format->format;
1514 	mode_cmd.width = fb->width;
1515 	mode_cmd.height = fb->height;
1516 	mode_cmd.pitches[0] = fb->pitches[0];
1517 	mode_cmd.modifier[0] = fb->modifier;
1518 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1519 
1520 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
1521 				   vma->obj, &mode_cmd)) {
1522 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1523 		goto err_vma;
1524 	}
1525 
1526 	plane_config->vma = vma;
1527 	return true;
1528 
1529 err_vma:
1530 	i915_vma_put(vma);
1531 	return false;
1532 }
1533 
1534 static void
1535 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1536 			struct intel_plane_state *plane_state,
1537 			bool visible)
1538 {
1539 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1540 
1541 	plane_state->uapi.visible = visible;
1542 
1543 	if (visible)
1544 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1545 	else
1546 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1547 }
1548 
1549 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1550 {
1551 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1552 	struct drm_plane *plane;
1553 
1554 	/*
1555 	 * Active_planes aliases if multiple "primary" or cursor planes
1556 	 * have been used on the same (or wrong) pipe. plane_mask uses
1557 	 * unique ids, hence we can use that to reconstruct active_planes.
1558 	 */
1559 	crtc_state->enabled_planes = 0;
1560 	crtc_state->active_planes = 0;
1561 
1562 	drm_for_each_plane_mask(plane, &dev_priv->drm,
1563 				crtc_state->uapi.plane_mask) {
1564 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1565 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1566 	}
1567 }
1568 
1569 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1570 					 struct intel_plane *plane)
1571 {
1572 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1573 	struct intel_crtc_state *crtc_state =
1574 		to_intel_crtc_state(crtc->base.state);
1575 	struct intel_plane_state *plane_state =
1576 		to_intel_plane_state(plane->base.state);
1577 
1578 	drm_dbg_kms(&dev_priv->drm,
1579 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1580 		    plane->base.base.id, plane->base.name,
1581 		    crtc->base.base.id, crtc->base.name);
1582 
1583 	intel_set_plane_visible(crtc_state, plane_state, false);
1584 	fixup_plane_bitmasks(crtc_state);
1585 	crtc_state->data_rate[plane->id] = 0;
1586 	crtc_state->min_cdclk[plane->id] = 0;
1587 
1588 	if (plane->id == PLANE_PRIMARY)
1589 		hsw_disable_ips(crtc_state);
1590 
1591 	/*
1592 	 * Vblank time updates from the shadow to live plane control register
1593 	 * are blocked if the memory self-refresh mode is active at that
1594 	 * moment. So to make sure the plane gets truly disabled, disable
1595 	 * first the self-refresh mode. The self-refresh enable bit in turn
1596 	 * will be checked/applied by the HW only at the next frame start
1597 	 * event which is after the vblank start event, so we need to have a
1598 	 * wait-for-vblank between disabling the plane and the pipe.
1599 	 */
1600 	if (HAS_GMCH(dev_priv) &&
1601 	    intel_set_memory_cxsr(dev_priv, false))
1602 		intel_wait_for_vblank(dev_priv, crtc->pipe);
1603 
1604 	/*
1605 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1606 	 * So disable underrun reporting before all the planes get disabled.
1607 	 */
1608 	if (IS_DISPLAY_VER(dev_priv, 2) && !crtc_state->active_planes)
1609 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1610 
1611 	intel_disable_plane(plane, crtc_state);
1612 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1613 }
1614 
1615 static void
1616 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
1617 			     struct intel_initial_plane_config *plane_config)
1618 {
1619 	struct drm_device *dev = intel_crtc->base.dev;
1620 	struct drm_i915_private *dev_priv = to_i915(dev);
1621 	struct drm_crtc *c;
1622 	struct drm_plane *primary = intel_crtc->base.primary;
1623 	struct drm_plane_state *plane_state = primary->state;
1624 	struct intel_plane *intel_plane = to_intel_plane(primary);
1625 	struct intel_plane_state *intel_state =
1626 		to_intel_plane_state(plane_state);
1627 	struct intel_crtc_state *crtc_state =
1628 		to_intel_crtc_state(intel_crtc->base.state);
1629 	struct drm_framebuffer *fb;
1630 	struct i915_vma *vma;
1631 
1632 	/*
1633 	 * TODO:
1634 	 *   Disable planes if get_initial_plane_config() failed.
1635 	 *   Make sure things work if the surface base is not page aligned.
1636 	 */
1637 	if (!plane_config->fb)
1638 		return;
1639 
1640 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
1641 		fb = &plane_config->fb->base;
1642 		vma = plane_config->vma;
1643 		goto valid_fb;
1644 	}
1645 
1646 	/*
1647 	 * Failed to alloc the obj, check to see if we should share
1648 	 * an fb with another CRTC instead
1649 	 */
1650 	for_each_crtc(dev, c) {
1651 		struct intel_plane_state *state;
1652 
1653 		if (c == &intel_crtc->base)
1654 			continue;
1655 
1656 		if (!to_intel_crtc_state(c->state)->uapi.active)
1657 			continue;
1658 
1659 		state = to_intel_plane_state(c->primary->state);
1660 		if (!state->vma)
1661 			continue;
1662 
1663 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
1664 			fb = state->hw.fb;
1665 			vma = state->vma;
1666 			goto valid_fb;
1667 		}
1668 	}
1669 
1670 	/*
1671 	 * We've failed to reconstruct the BIOS FB.  Current display state
1672 	 * indicates that the primary plane is visible, but has a NULL FB,
1673 	 * which will lead to problems later if we don't fix it up.  The
1674 	 * simplest solution is to just disable the primary plane now and
1675 	 * pretend the BIOS never had it enabled.
1676 	 */
1677 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
1678 	if (crtc_state->bigjoiner) {
1679 		struct intel_crtc *slave =
1680 			crtc_state->bigjoiner_linked_crtc;
1681 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1682 	}
1683 
1684 	return;
1685 
1686 valid_fb:
1687 	plane_state->rotation = plane_config->rotation;
1688 	intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
1689 			   &intel_state->view);
1690 
1691 	__i915_vma_pin(vma);
1692 	intel_state->vma = i915_vma_get(vma);
1693 	if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
1694 		if (vma->fence)
1695 			intel_state->flags |= PLANE_HAS_FENCE;
1696 
1697 	plane_state->src_x = 0;
1698 	plane_state->src_y = 0;
1699 	plane_state->src_w = fb->width << 16;
1700 	plane_state->src_h = fb->height << 16;
1701 
1702 	plane_state->crtc_x = 0;
1703 	plane_state->crtc_y = 0;
1704 	plane_state->crtc_w = fb->width;
1705 	plane_state->crtc_h = fb->height;
1706 
1707 	if (plane_config->tiling)
1708 		dev_priv->preserve_bios_swizzle = true;
1709 
1710 	plane_state->fb = fb;
1711 	drm_framebuffer_get(fb);
1712 
1713 	plane_state->crtc = &intel_crtc->base;
1714 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
1715 					  intel_crtc);
1716 
1717 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
1718 
1719 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
1720 		  &to_intel_frontbuffer(fb)->bits);
1721 }
1722 
1723 unsigned int
1724 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1725 {
1726 	int x = 0, y = 0;
1727 
1728 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1729 					  plane_state->view.color_plane[0].offset, 0);
1730 
1731 	return y;
1732 }
1733 
1734 static int
1735 __intel_display_resume(struct drm_device *dev,
1736 		       struct drm_atomic_state *state,
1737 		       struct drm_modeset_acquire_ctx *ctx)
1738 {
1739 	struct drm_crtc_state *crtc_state;
1740 	struct drm_crtc *crtc;
1741 	int i, ret;
1742 
1743 	intel_modeset_setup_hw_state(dev, ctx);
1744 	intel_vga_redisable(to_i915(dev));
1745 
1746 	if (!state)
1747 		return 0;
1748 
1749 	/*
1750 	 * We've duplicated the state, pointers to the old state are invalid.
1751 	 *
1752 	 * Don't attempt to use the old state until we commit the duplicated state.
1753 	 */
1754 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1755 		/*
1756 		 * Force recalculation even if we restore
1757 		 * current state. With fast modeset this may not result
1758 		 * in a modeset when the state is compatible.
1759 		 */
1760 		crtc_state->mode_changed = true;
1761 	}
1762 
1763 	/* ignore any reset values/BIOS leftovers in the WM registers */
1764 	if (!HAS_GMCH(to_i915(dev)))
1765 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
1766 
1767 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1768 
1769 	drm_WARN_ON(dev, ret == -EDEADLK);
1770 	return ret;
1771 }
1772 
1773 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1774 {
1775 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1776 		intel_has_gpu_reset(&dev_priv->gt));
1777 }
1778 
1779 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1780 {
1781 	struct drm_device *dev = &dev_priv->drm;
1782 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1783 	struct drm_atomic_state *state;
1784 	int ret;
1785 
1786 	if (!HAS_DISPLAY(dev_priv))
1787 		return;
1788 
1789 	/* reset doesn't touch the display */
1790 	if (!dev_priv->params.force_reset_modeset_test &&
1791 	    !gpu_reset_clobbers_display(dev_priv))
1792 		return;
1793 
1794 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
1795 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1796 	smp_mb__after_atomic();
1797 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1798 
1799 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1800 		drm_dbg_kms(&dev_priv->drm,
1801 			    "Modeset potentially stuck, unbreaking through wedging\n");
1802 		intel_gt_set_wedged(&dev_priv->gt);
1803 	}
1804 
1805 	/*
1806 	 * Need mode_config.mutex so that we don't
1807 	 * trample ongoing ->detect() and whatnot.
1808 	 */
1809 	mutex_lock(&dev->mode_config.mutex);
1810 	drm_modeset_acquire_init(ctx, 0);
1811 	while (1) {
1812 		ret = drm_modeset_lock_all_ctx(dev, ctx);
1813 		if (ret != -EDEADLK)
1814 			break;
1815 
1816 		drm_modeset_backoff(ctx);
1817 	}
1818 	/*
1819 	 * Disabling the crtcs gracefully seems nicer. Also the
1820 	 * g33 docs say we should at least disable all the planes.
1821 	 */
1822 	state = drm_atomic_helper_duplicate_state(dev, ctx);
1823 	if (IS_ERR(state)) {
1824 		ret = PTR_ERR(state);
1825 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1826 			ret);
1827 		return;
1828 	}
1829 
1830 	ret = drm_atomic_helper_disable_all(dev, ctx);
1831 	if (ret) {
1832 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1833 			ret);
1834 		drm_atomic_state_put(state);
1835 		return;
1836 	}
1837 
1838 	dev_priv->modeset_restore_state = state;
1839 	state->acquire_ctx = ctx;
1840 }
1841 
1842 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1843 {
1844 	struct drm_device *dev = &dev_priv->drm;
1845 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1846 	struct drm_atomic_state *state;
1847 	int ret;
1848 
1849 	if (!HAS_DISPLAY(dev_priv))
1850 		return;
1851 
1852 	/* reset doesn't touch the display */
1853 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1854 		return;
1855 
1856 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
1857 	if (!state)
1858 		goto unlock;
1859 
1860 	/* reset doesn't touch the display */
1861 	if (!gpu_reset_clobbers_display(dev_priv)) {
1862 		/* for testing only restore the display */
1863 		ret = __intel_display_resume(dev, state, ctx);
1864 		if (ret)
1865 			drm_err(&dev_priv->drm,
1866 				"Restoring old state failed with %i\n", ret);
1867 	} else {
1868 		/*
1869 		 * The display has been reset as well,
1870 		 * so need a full re-initialization.
1871 		 */
1872 		intel_pps_unlock_regs_wa(dev_priv);
1873 		intel_modeset_init_hw(dev_priv);
1874 		intel_init_clock_gating(dev_priv);
1875 		intel_hpd_init(dev_priv);
1876 
1877 		ret = __intel_display_resume(dev, state, ctx);
1878 		if (ret)
1879 			drm_err(&dev_priv->drm,
1880 				"Restoring old state failed with %i\n", ret);
1881 
1882 		intel_hpd_poll_disable(dev_priv);
1883 	}
1884 
1885 	drm_atomic_state_put(state);
1886 unlock:
1887 	drm_modeset_drop_locks(ctx);
1888 	drm_modeset_acquire_fini(ctx);
1889 	mutex_unlock(&dev->mode_config.mutex);
1890 
1891 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1892 }
1893 
1894 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
1895 {
1896 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1897 	enum pipe pipe = crtc->pipe;
1898 	u32 tmp;
1899 
1900 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1901 
1902 	/*
1903 	 * Display WA #1153: icl
1904 	 * enable hardware to bypass the alpha math
1905 	 * and rounding for per-pixel values 00 and 0xff
1906 	 */
1907 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1908 	/*
1909 	 * Display WA # 1605353570: icl
1910 	 * Set the pixel rounding bit to 1 for allowing
1911 	 * passthrough of Frame buffer pixels unmodified
1912 	 * across pipe
1913 	 */
1914 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1915 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1916 }
1917 
1918 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1919 {
1920 	struct drm_crtc *crtc;
1921 	bool cleanup_done;
1922 
1923 	drm_for_each_crtc(crtc, &dev_priv->drm) {
1924 		struct drm_crtc_commit *commit;
1925 		spin_lock(&crtc->commit_lock);
1926 		commit = list_first_entry_or_null(&crtc->commit_list,
1927 						  struct drm_crtc_commit, commit_entry);
1928 		cleanup_done = commit ?
1929 			try_wait_for_completion(&commit->cleanup_done) : true;
1930 		spin_unlock(&crtc->commit_lock);
1931 
1932 		if (cleanup_done)
1933 			continue;
1934 
1935 		drm_crtc_wait_one_vblank(crtc);
1936 
1937 		return true;
1938 	}
1939 
1940 	return false;
1941 }
1942 
1943 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
1944 {
1945 	u32 temp;
1946 
1947 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
1948 
1949 	mutex_lock(&dev_priv->sb_lock);
1950 
1951 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1952 	temp |= SBI_SSCCTL_DISABLE;
1953 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1954 
1955 	mutex_unlock(&dev_priv->sb_lock);
1956 }
1957 
1958 /* Program iCLKIP clock to the desired frequency */
1959 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
1960 {
1961 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1962 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1963 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1964 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
1965 	u32 temp;
1966 
1967 	lpt_disable_iclkip(dev_priv);
1968 
1969 	/* The iCLK virtual clock root frequency is in MHz,
1970 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
1971 	 * divisors, it is necessary to divide one by another, so we
1972 	 * convert the virtual clock precision to KHz here for higher
1973 	 * precision.
1974 	 */
1975 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
1976 		u32 iclk_virtual_root_freq = 172800 * 1000;
1977 		u32 iclk_pi_range = 64;
1978 		u32 desired_divisor;
1979 
1980 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1981 						    clock << auxdiv);
1982 		divsel = (desired_divisor / iclk_pi_range) - 2;
1983 		phaseinc = desired_divisor % iclk_pi_range;
1984 
1985 		/*
1986 		 * Near 20MHz is a corner case which is
1987 		 * out of range for the 7-bit divisor
1988 		 */
1989 		if (divsel <= 0x7f)
1990 			break;
1991 	}
1992 
1993 	/* This should not happen with any sane values */
1994 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
1995 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
1996 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
1997 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
1998 
1999 	drm_dbg_kms(&dev_priv->drm,
2000 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2001 		    clock, auxdiv, divsel, phasedir, phaseinc);
2002 
2003 	mutex_lock(&dev_priv->sb_lock);
2004 
2005 	/* Program SSCDIVINTPHASE6 */
2006 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2007 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2008 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2009 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2010 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2011 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2012 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2013 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2014 
2015 	/* Program SSCAUXDIV */
2016 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2017 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2018 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2019 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2020 
2021 	/* Enable modulator and associated divider */
2022 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2023 	temp &= ~SBI_SSCCTL_DISABLE;
2024 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2025 
2026 	mutex_unlock(&dev_priv->sb_lock);
2027 
2028 	/* Wait for initialization time */
2029 	udelay(24);
2030 
2031 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2032 }
2033 
2034 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2035 {
2036 	u32 divsel, phaseinc, auxdiv;
2037 	u32 iclk_virtual_root_freq = 172800 * 1000;
2038 	u32 iclk_pi_range = 64;
2039 	u32 desired_divisor;
2040 	u32 temp;
2041 
2042 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2043 		return 0;
2044 
2045 	mutex_lock(&dev_priv->sb_lock);
2046 
2047 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2048 	if (temp & SBI_SSCCTL_DISABLE) {
2049 		mutex_unlock(&dev_priv->sb_lock);
2050 		return 0;
2051 	}
2052 
2053 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2054 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2055 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2056 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2057 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2058 
2059 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2060 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2061 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2062 
2063 	mutex_unlock(&dev_priv->sb_lock);
2064 
2065 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2066 
2067 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2068 				 desired_divisor << auxdiv);
2069 }
2070 
2071 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2072 					   enum pipe pch_transcoder)
2073 {
2074 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2075 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2076 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2077 
2078 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2079 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2080 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2081 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2082 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2083 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2084 
2085 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2086 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2087 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2088 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2089 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2090 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2091 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2092 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2093 }
2094 
2095 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2096 {
2097 	u32 temp;
2098 
2099 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2100 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2101 		return;
2102 
2103 	drm_WARN_ON(&dev_priv->drm,
2104 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2105 		    FDI_RX_ENABLE);
2106 	drm_WARN_ON(&dev_priv->drm,
2107 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2108 		    FDI_RX_ENABLE);
2109 
2110 	temp &= ~FDI_BC_BIFURCATION_SELECT;
2111 	if (enable)
2112 		temp |= FDI_BC_BIFURCATION_SELECT;
2113 
2114 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2115 		    enable ? "en" : "dis");
2116 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2117 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2118 }
2119 
2120 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2121 {
2122 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2123 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2124 
2125 	switch (crtc->pipe) {
2126 	case PIPE_A:
2127 		break;
2128 	case PIPE_B:
2129 		if (crtc_state->fdi_lanes > 2)
2130 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
2131 		else
2132 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
2133 
2134 		break;
2135 	case PIPE_C:
2136 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
2137 
2138 		break;
2139 	default:
2140 		BUG();
2141 	}
2142 }
2143 
2144 /*
2145  * Finds the encoder associated with the given CRTC. This can only be
2146  * used when we know that the CRTC isn't feeding multiple encoders!
2147  */
2148 struct intel_encoder *
2149 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2150 			   const struct intel_crtc_state *crtc_state)
2151 {
2152 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2153 	const struct drm_connector_state *connector_state;
2154 	const struct drm_connector *connector;
2155 	struct intel_encoder *encoder = NULL;
2156 	int num_encoders = 0;
2157 	int i;
2158 
2159 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2160 		if (connector_state->crtc != &crtc->base)
2161 			continue;
2162 
2163 		encoder = to_intel_encoder(connector_state->best_encoder);
2164 		num_encoders++;
2165 	}
2166 
2167 	drm_WARN(encoder->base.dev, num_encoders != 1,
2168 		 "%d encoders for pipe %c\n",
2169 		 num_encoders, pipe_name(crtc->pipe));
2170 
2171 	return encoder;
2172 }
2173 
2174 /*
2175  * Enable PCH resources required for PCH ports:
2176  *   - PCH PLLs
2177  *   - FDI training & RX/TX
2178  *   - update transcoder timings
2179  *   - DP transcoding bits
2180  *   - transcoder
2181  */
2182 static void ilk_pch_enable(const struct intel_atomic_state *state,
2183 			   const struct intel_crtc_state *crtc_state)
2184 {
2185 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2186 	struct drm_device *dev = crtc->base.dev;
2187 	struct drm_i915_private *dev_priv = to_i915(dev);
2188 	enum pipe pipe = crtc->pipe;
2189 	u32 temp;
2190 
2191 	assert_pch_transcoder_disabled(dev_priv, pipe);
2192 
2193 	if (IS_IVYBRIDGE(dev_priv))
2194 		ivb_update_fdi_bc_bifurcation(crtc_state);
2195 
2196 	/* Write the TU size bits before fdi link training, so that error
2197 	 * detection works. */
2198 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2199 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2200 
2201 	/* For PCH output, training FDI link */
2202 	dev_priv->display.fdi_link_train(crtc, crtc_state);
2203 
2204 	/* We need to program the right clock selection before writing the pixel
2205 	 * mutliplier into the DPLL. */
2206 	if (HAS_PCH_CPT(dev_priv)) {
2207 		u32 sel;
2208 
2209 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2210 		temp |= TRANS_DPLL_ENABLE(pipe);
2211 		sel = TRANS_DPLLB_SEL(pipe);
2212 		if (crtc_state->shared_dpll ==
2213 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2214 			temp |= sel;
2215 		else
2216 			temp &= ~sel;
2217 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2218 	}
2219 
2220 	/* XXX: pch pll's can be enabled any time before we enable the PCH
2221 	 * transcoder, and we actually should do this to not upset any PCH
2222 	 * transcoder that already use the clock when we share it.
2223 	 *
2224 	 * Note that enable_shared_dpll tries to do the right thing, but
2225 	 * get_shared_dpll unconditionally resets the pll - we need that to have
2226 	 * the right LVDS enable sequence. */
2227 	intel_enable_shared_dpll(crtc_state);
2228 
2229 	/* set transcoder timing, panel must allow it */
2230 	assert_panel_unlocked(dev_priv, pipe);
2231 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
2232 
2233 	intel_fdi_normal_train(crtc);
2234 
2235 	/* For PCH DP, enable TRANS_DP_CTL */
2236 	if (HAS_PCH_CPT(dev_priv) &&
2237 	    intel_crtc_has_dp_encoder(crtc_state)) {
2238 		const struct drm_display_mode *adjusted_mode =
2239 			&crtc_state->hw.adjusted_mode;
2240 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2241 		i915_reg_t reg = TRANS_DP_CTL(pipe);
2242 		enum port port;
2243 
2244 		temp = intel_de_read(dev_priv, reg);
2245 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2246 			  TRANS_DP_SYNC_MASK |
2247 			  TRANS_DP_BPC_MASK);
2248 		temp |= TRANS_DP_OUTPUT_ENABLE;
2249 		temp |= bpc << 9; /* same format but at 11:9 */
2250 
2251 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2252 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2253 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2254 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2255 
2256 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2257 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2258 		temp |= TRANS_DP_PORT_SEL(port);
2259 
2260 		intel_de_write(dev_priv, reg, temp);
2261 	}
2262 
2263 	ilk_enable_pch_transcoder(crtc_state);
2264 }
2265 
2266 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2267 {
2268 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2269 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2270 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2271 
2272 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2273 
2274 	lpt_program_iclkip(crtc_state);
2275 
2276 	/* Set transcoder timing. */
2277 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2278 
2279 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2280 }
2281 
2282 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2283 			       enum pipe pipe)
2284 {
2285 	i915_reg_t dslreg = PIPEDSL(pipe);
2286 	u32 temp;
2287 
2288 	temp = intel_de_read(dev_priv, dslreg);
2289 	udelay(500);
2290 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2291 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2292 			drm_err(&dev_priv->drm,
2293 				"mode set failed: pipe %c stuck\n",
2294 				pipe_name(pipe));
2295 	}
2296 }
2297 
2298 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2299 {
2300 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2301 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2302 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2303 	enum pipe pipe = crtc->pipe;
2304 	int width = drm_rect_width(dst);
2305 	int height = drm_rect_height(dst);
2306 	int x = dst->x1;
2307 	int y = dst->y1;
2308 
2309 	if (!crtc_state->pch_pfit.enabled)
2310 		return;
2311 
2312 	/* Force use of hard-coded filter coefficients
2313 	 * as some pre-programmed values are broken,
2314 	 * e.g. x201.
2315 	 */
2316 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2317 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2318 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2319 	else
2320 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2321 			       PF_FILTER_MED_3x3);
2322 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2323 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2324 }
2325 
2326 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2327 {
2328 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2329 	struct drm_device *dev = crtc->base.dev;
2330 	struct drm_i915_private *dev_priv = to_i915(dev);
2331 
2332 	if (!crtc_state->ips_enabled)
2333 		return;
2334 
2335 	/*
2336 	 * We can only enable IPS after we enable a plane and wait for a vblank
2337 	 * This function is called from post_plane_update, which is run after
2338 	 * a vblank wait.
2339 	 */
2340 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2341 
2342 	if (IS_BROADWELL(dev_priv)) {
2343 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2344 							 IPS_ENABLE | IPS_PCODE_CONTROL));
2345 		/* Quoting Art Runyan: "its not safe to expect any particular
2346 		 * value in IPS_CTL bit 31 after enabling IPS through the
2347 		 * mailbox." Moreover, the mailbox may return a bogus state,
2348 		 * so we need to just enable it and continue on.
2349 		 */
2350 	} else {
2351 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2352 		/* The bit only becomes 1 in the next vblank, so this wait here
2353 		 * is essentially intel_wait_for_vblank. If we don't have this
2354 		 * and don't wait for vblanks until the end of crtc_enable, then
2355 		 * the HW state readout code will complain that the expected
2356 		 * IPS_CTL value is not the one we read. */
2357 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2358 			drm_err(&dev_priv->drm,
2359 				"Timed out waiting for IPS enable\n");
2360 	}
2361 }
2362 
2363 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2364 {
2365 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2366 	struct drm_device *dev = crtc->base.dev;
2367 	struct drm_i915_private *dev_priv = to_i915(dev);
2368 
2369 	if (!crtc_state->ips_enabled)
2370 		return;
2371 
2372 	if (IS_BROADWELL(dev_priv)) {
2373 		drm_WARN_ON(dev,
2374 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2375 		/*
2376 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
2377 		 * 42ms timeout value leads to occasional timeouts so use 100ms
2378 		 * instead.
2379 		 */
2380 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2381 			drm_err(&dev_priv->drm,
2382 				"Timed out waiting for IPS disable\n");
2383 	} else {
2384 		intel_de_write(dev_priv, IPS_CTL, 0);
2385 		intel_de_posting_read(dev_priv, IPS_CTL);
2386 	}
2387 
2388 	/* We need to wait for a vblank before we can disable the plane. */
2389 	intel_wait_for_vblank(dev_priv, crtc->pipe);
2390 }
2391 
2392 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
2393 {
2394 	if (intel_crtc->overlay)
2395 		(void) intel_overlay_switch_off(intel_crtc->overlay);
2396 
2397 	/* Let userspace switch the overlay on again. In most cases userspace
2398 	 * has to recompute where to put it anyway.
2399 	 */
2400 }
2401 
2402 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2403 				       const struct intel_crtc_state *new_crtc_state)
2404 {
2405 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2406 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2407 
2408 	if (!old_crtc_state->ips_enabled)
2409 		return false;
2410 
2411 	if (intel_crtc_needs_modeset(new_crtc_state))
2412 		return true;
2413 
2414 	/*
2415 	 * Workaround : Do not read or write the pipe palette/gamma data while
2416 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2417 	 *
2418 	 * Disable IPS before we program the LUT.
2419 	 */
2420 	if (IS_HASWELL(dev_priv) &&
2421 	    (new_crtc_state->uapi.color_mgmt_changed ||
2422 	     new_crtc_state->update_pipe) &&
2423 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2424 		return true;
2425 
2426 	return !new_crtc_state->ips_enabled;
2427 }
2428 
2429 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2430 				       const struct intel_crtc_state *new_crtc_state)
2431 {
2432 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2433 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2434 
2435 	if (!new_crtc_state->ips_enabled)
2436 		return false;
2437 
2438 	if (intel_crtc_needs_modeset(new_crtc_state))
2439 		return true;
2440 
2441 	/*
2442 	 * Workaround : Do not read or write the pipe palette/gamma data while
2443 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2444 	 *
2445 	 * Re-enable IPS after the LUT has been programmed.
2446 	 */
2447 	if (IS_HASWELL(dev_priv) &&
2448 	    (new_crtc_state->uapi.color_mgmt_changed ||
2449 	     new_crtc_state->update_pipe) &&
2450 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2451 		return true;
2452 
2453 	/*
2454 	 * We can't read out IPS on broadwell, assume the worst and
2455 	 * forcibly enable IPS on the first fastset.
2456 	 */
2457 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2458 		return true;
2459 
2460 	return !old_crtc_state->ips_enabled;
2461 }
2462 
2463 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2464 {
2465 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2466 
2467 	if (!crtc_state->nv12_planes)
2468 		return false;
2469 
2470 	/* WA Display #0827: Gen9:all */
2471 	if (IS_DISPLAY_VER(dev_priv, 9))
2472 		return true;
2473 
2474 	return false;
2475 }
2476 
2477 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2478 {
2479 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2480 
2481 	/* Wa_2006604312:icl,ehl */
2482 	if (crtc_state->scaler_state.scaler_users > 0 && IS_DISPLAY_VER(dev_priv, 11))
2483 		return true;
2484 
2485 	return false;
2486 }
2487 
2488 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2489 			    const struct intel_crtc_state *new_crtc_state)
2490 {
2491 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2492 		new_crtc_state->active_planes;
2493 }
2494 
2495 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2496 			     const struct intel_crtc_state *new_crtc_state)
2497 {
2498 	return old_crtc_state->active_planes &&
2499 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2500 }
2501 
2502 static void intel_post_plane_update(struct intel_atomic_state *state,
2503 				    struct intel_crtc *crtc)
2504 {
2505 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2506 	const struct intel_crtc_state *old_crtc_state =
2507 		intel_atomic_get_old_crtc_state(state, crtc);
2508 	const struct intel_crtc_state *new_crtc_state =
2509 		intel_atomic_get_new_crtc_state(state, crtc);
2510 	enum pipe pipe = crtc->pipe;
2511 
2512 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2513 
2514 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2515 		intel_update_watermarks(crtc);
2516 
2517 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2518 		hsw_enable_ips(new_crtc_state);
2519 
2520 	intel_fbc_post_update(state, crtc);
2521 
2522 	if (needs_nv12_wa(old_crtc_state) &&
2523 	    !needs_nv12_wa(new_crtc_state))
2524 		skl_wa_827(dev_priv, pipe, false);
2525 
2526 	if (needs_scalerclk_wa(old_crtc_state) &&
2527 	    !needs_scalerclk_wa(new_crtc_state))
2528 		icl_wa_scalerclkgating(dev_priv, pipe, false);
2529 }
2530 
2531 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2532 					struct intel_crtc *crtc)
2533 {
2534 	const struct intel_crtc_state *crtc_state =
2535 		intel_atomic_get_new_crtc_state(state, crtc);
2536 	u8 update_planes = crtc_state->update_planes;
2537 	const struct intel_plane_state *plane_state;
2538 	struct intel_plane *plane;
2539 	int i;
2540 
2541 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2542 		if (plane->enable_flip_done &&
2543 		    plane->pipe == crtc->pipe &&
2544 		    update_planes & BIT(plane->id))
2545 			plane->enable_flip_done(plane);
2546 	}
2547 }
2548 
2549 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2550 					 struct intel_crtc *crtc)
2551 {
2552 	const struct intel_crtc_state *crtc_state =
2553 		intel_atomic_get_new_crtc_state(state, crtc);
2554 	u8 update_planes = crtc_state->update_planes;
2555 	const struct intel_plane_state *plane_state;
2556 	struct intel_plane *plane;
2557 	int i;
2558 
2559 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2560 		if (plane->disable_flip_done &&
2561 		    plane->pipe == crtc->pipe &&
2562 		    update_planes & BIT(plane->id))
2563 			plane->disable_flip_done(plane);
2564 	}
2565 }
2566 
2567 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2568 					     struct intel_crtc *crtc)
2569 {
2570 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2571 	const struct intel_crtc_state *old_crtc_state =
2572 		intel_atomic_get_old_crtc_state(state, crtc);
2573 	const struct intel_crtc_state *new_crtc_state =
2574 		intel_atomic_get_new_crtc_state(state, crtc);
2575 	u8 update_planes = new_crtc_state->update_planes;
2576 	const struct intel_plane_state *old_plane_state;
2577 	struct intel_plane *plane;
2578 	bool need_vbl_wait = false;
2579 	int i;
2580 
2581 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2582 		if (plane->need_async_flip_disable_wa &&
2583 		    plane->pipe == crtc->pipe &&
2584 		    update_planes & BIT(plane->id)) {
2585 			/*
2586 			 * Apart from the async flip bit we want to
2587 			 * preserve the old state for the plane.
2588 			 */
2589 			plane->async_flip(plane, old_crtc_state,
2590 					  old_plane_state, false);
2591 			need_vbl_wait = true;
2592 		}
2593 	}
2594 
2595 	if (need_vbl_wait)
2596 		intel_wait_for_vblank(i915, crtc->pipe);
2597 }
2598 
2599 static void intel_pre_plane_update(struct intel_atomic_state *state,
2600 				   struct intel_crtc *crtc)
2601 {
2602 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2603 	const struct intel_crtc_state *old_crtc_state =
2604 		intel_atomic_get_old_crtc_state(state, crtc);
2605 	const struct intel_crtc_state *new_crtc_state =
2606 		intel_atomic_get_new_crtc_state(state, crtc);
2607 	enum pipe pipe = crtc->pipe;
2608 
2609 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2610 		hsw_disable_ips(old_crtc_state);
2611 
2612 	if (intel_fbc_pre_update(state, crtc))
2613 		intel_wait_for_vblank(dev_priv, pipe);
2614 
2615 	/* Display WA 827 */
2616 	if (!needs_nv12_wa(old_crtc_state) &&
2617 	    needs_nv12_wa(new_crtc_state))
2618 		skl_wa_827(dev_priv, pipe, true);
2619 
2620 	/* Wa_2006604312:icl,ehl */
2621 	if (!needs_scalerclk_wa(old_crtc_state) &&
2622 	    needs_scalerclk_wa(new_crtc_state))
2623 		icl_wa_scalerclkgating(dev_priv, pipe, true);
2624 
2625 	/*
2626 	 * Vblank time updates from the shadow to live plane control register
2627 	 * are blocked if the memory self-refresh mode is active at that
2628 	 * moment. So to make sure the plane gets truly disabled, disable
2629 	 * first the self-refresh mode. The self-refresh enable bit in turn
2630 	 * will be checked/applied by the HW only at the next frame start
2631 	 * event which is after the vblank start event, so we need to have a
2632 	 * wait-for-vblank between disabling the plane and the pipe.
2633 	 */
2634 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2635 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2636 		intel_wait_for_vblank(dev_priv, pipe);
2637 
2638 	/*
2639 	 * IVB workaround: must disable low power watermarks for at least
2640 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2641 	 * when scaling is disabled.
2642 	 *
2643 	 * WaCxSRDisabledForSpriteScaling:ivb
2644 	 */
2645 	if (old_crtc_state->hw.active &&
2646 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2647 		intel_wait_for_vblank(dev_priv, pipe);
2648 
2649 	/*
2650 	 * If we're doing a modeset we don't need to do any
2651 	 * pre-vblank watermark programming here.
2652 	 */
2653 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
2654 		/*
2655 		 * For platforms that support atomic watermarks, program the
2656 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2657 		 * will be the intermediate values that are safe for both pre- and
2658 		 * post- vblank; when vblank happens, the 'active' values will be set
2659 		 * to the final 'target' values and we'll do this again to get the
2660 		 * optimal watermarks.  For gen9+ platforms, the values we program here
2661 		 * will be the final target values which will get automatically latched
2662 		 * at vblank time; no further programming will be necessary.
2663 		 *
2664 		 * If a platform hasn't been transitioned to atomic watermarks yet,
2665 		 * we'll continue to update watermarks the old way, if flags tell
2666 		 * us to.
2667 		 */
2668 		if (dev_priv->display.initial_watermarks)
2669 			dev_priv->display.initial_watermarks(state, crtc);
2670 		else if (new_crtc_state->update_wm_pre)
2671 			intel_update_watermarks(crtc);
2672 	}
2673 
2674 	/*
2675 	 * Gen2 reports pipe underruns whenever all planes are disabled.
2676 	 * So disable underrun reporting before all the planes get disabled.
2677 	 *
2678 	 * We do this after .initial_watermarks() so that we have a
2679 	 * chance of catching underruns with the intermediate watermarks
2680 	 * vs. the old plane configuration.
2681 	 */
2682 	if (IS_DISPLAY_VER(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
2683 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2684 
2685 	/*
2686 	 * WA for platforms where async address update enable bit
2687 	 * is double buffered and only latched at start of vblank.
2688 	 */
2689 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2690 		intel_crtc_async_flip_disable_wa(state, crtc);
2691 }
2692 
2693 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2694 				      struct intel_crtc *crtc)
2695 {
2696 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2697 	const struct intel_crtc_state *new_crtc_state =
2698 		intel_atomic_get_new_crtc_state(state, crtc);
2699 	unsigned int update_mask = new_crtc_state->update_planes;
2700 	const struct intel_plane_state *old_plane_state;
2701 	struct intel_plane *plane;
2702 	unsigned fb_bits = 0;
2703 	int i;
2704 
2705 	intel_crtc_dpms_overlay_disable(crtc);
2706 
2707 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2708 		if (crtc->pipe != plane->pipe ||
2709 		    !(update_mask & BIT(plane->id)))
2710 			continue;
2711 
2712 		intel_disable_plane(plane, new_crtc_state);
2713 
2714 		if (old_plane_state->uapi.visible)
2715 			fb_bits |= plane->frontbuffer_bit;
2716 	}
2717 
2718 	intel_frontbuffer_flip(dev_priv, fb_bits);
2719 }
2720 
2721 /*
2722  * intel_connector_primary_encoder - get the primary encoder for a connector
2723  * @connector: connector for which to return the encoder
2724  *
2725  * Returns the primary encoder for a connector. There is a 1:1 mapping from
2726  * all connectors to their encoder, except for DP-MST connectors which have
2727  * both a virtual and a primary encoder. These DP-MST primary encoders can be
2728  * pointed to by as many DP-MST connectors as there are pipes.
2729  */
2730 static struct intel_encoder *
2731 intel_connector_primary_encoder(struct intel_connector *connector)
2732 {
2733 	struct intel_encoder *encoder;
2734 
2735 	if (connector->mst_port)
2736 		return &dp_to_dig_port(connector->mst_port)->base;
2737 
2738 	encoder = intel_attached_encoder(connector);
2739 	drm_WARN_ON(connector->base.dev, !encoder);
2740 
2741 	return encoder;
2742 }
2743 
2744 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2745 {
2746 	struct drm_connector_state *new_conn_state;
2747 	struct drm_connector *connector;
2748 	int i;
2749 
2750 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2751 					i) {
2752 		struct intel_connector *intel_connector;
2753 		struct intel_encoder *encoder;
2754 		struct intel_crtc *crtc;
2755 
2756 		if (!intel_connector_needs_modeset(state, connector))
2757 			continue;
2758 
2759 		intel_connector = to_intel_connector(connector);
2760 		encoder = intel_connector_primary_encoder(intel_connector);
2761 		if (!encoder->update_prepare)
2762 			continue;
2763 
2764 		crtc = new_conn_state->crtc ?
2765 			to_intel_crtc(new_conn_state->crtc) : NULL;
2766 		encoder->update_prepare(state, encoder, crtc);
2767 	}
2768 }
2769 
2770 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2771 {
2772 	struct drm_connector_state *new_conn_state;
2773 	struct drm_connector *connector;
2774 	int i;
2775 
2776 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2777 					i) {
2778 		struct intel_connector *intel_connector;
2779 		struct intel_encoder *encoder;
2780 		struct intel_crtc *crtc;
2781 
2782 		if (!intel_connector_needs_modeset(state, connector))
2783 			continue;
2784 
2785 		intel_connector = to_intel_connector(connector);
2786 		encoder = intel_connector_primary_encoder(intel_connector);
2787 		if (!encoder->update_complete)
2788 			continue;
2789 
2790 		crtc = new_conn_state->crtc ?
2791 			to_intel_crtc(new_conn_state->crtc) : NULL;
2792 		encoder->update_complete(state, encoder, crtc);
2793 	}
2794 }
2795 
2796 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2797 					  struct intel_crtc *crtc)
2798 {
2799 	const struct intel_crtc_state *crtc_state =
2800 		intel_atomic_get_new_crtc_state(state, crtc);
2801 	const struct drm_connector_state *conn_state;
2802 	struct drm_connector *conn;
2803 	int i;
2804 
2805 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2806 		struct intel_encoder *encoder =
2807 			to_intel_encoder(conn_state->best_encoder);
2808 
2809 		if (conn_state->crtc != &crtc->base)
2810 			continue;
2811 
2812 		if (encoder->pre_pll_enable)
2813 			encoder->pre_pll_enable(state, encoder,
2814 						crtc_state, conn_state);
2815 	}
2816 }
2817 
2818 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2819 				      struct intel_crtc *crtc)
2820 {
2821 	const struct intel_crtc_state *crtc_state =
2822 		intel_atomic_get_new_crtc_state(state, crtc);
2823 	const struct drm_connector_state *conn_state;
2824 	struct drm_connector *conn;
2825 	int i;
2826 
2827 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2828 		struct intel_encoder *encoder =
2829 			to_intel_encoder(conn_state->best_encoder);
2830 
2831 		if (conn_state->crtc != &crtc->base)
2832 			continue;
2833 
2834 		if (encoder->pre_enable)
2835 			encoder->pre_enable(state, encoder,
2836 					    crtc_state, conn_state);
2837 	}
2838 }
2839 
2840 static void intel_encoders_enable(struct intel_atomic_state *state,
2841 				  struct intel_crtc *crtc)
2842 {
2843 	const struct intel_crtc_state *crtc_state =
2844 		intel_atomic_get_new_crtc_state(state, crtc);
2845 	const struct drm_connector_state *conn_state;
2846 	struct drm_connector *conn;
2847 	int i;
2848 
2849 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2850 		struct intel_encoder *encoder =
2851 			to_intel_encoder(conn_state->best_encoder);
2852 
2853 		if (conn_state->crtc != &crtc->base)
2854 			continue;
2855 
2856 		if (encoder->enable)
2857 			encoder->enable(state, encoder,
2858 					crtc_state, conn_state);
2859 		intel_opregion_notify_encoder(encoder, true);
2860 	}
2861 }
2862 
2863 static void intel_encoders_disable(struct intel_atomic_state *state,
2864 				   struct intel_crtc *crtc)
2865 {
2866 	const struct intel_crtc_state *old_crtc_state =
2867 		intel_atomic_get_old_crtc_state(state, crtc);
2868 	const struct drm_connector_state *old_conn_state;
2869 	struct drm_connector *conn;
2870 	int i;
2871 
2872 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2873 		struct intel_encoder *encoder =
2874 			to_intel_encoder(old_conn_state->best_encoder);
2875 
2876 		if (old_conn_state->crtc != &crtc->base)
2877 			continue;
2878 
2879 		intel_opregion_notify_encoder(encoder, false);
2880 		if (encoder->disable)
2881 			encoder->disable(state, encoder,
2882 					 old_crtc_state, old_conn_state);
2883 	}
2884 }
2885 
2886 static void intel_encoders_post_disable(struct intel_atomic_state *state,
2887 					struct intel_crtc *crtc)
2888 {
2889 	const struct intel_crtc_state *old_crtc_state =
2890 		intel_atomic_get_old_crtc_state(state, crtc);
2891 	const struct drm_connector_state *old_conn_state;
2892 	struct drm_connector *conn;
2893 	int i;
2894 
2895 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2896 		struct intel_encoder *encoder =
2897 			to_intel_encoder(old_conn_state->best_encoder);
2898 
2899 		if (old_conn_state->crtc != &crtc->base)
2900 			continue;
2901 
2902 		if (encoder->post_disable)
2903 			encoder->post_disable(state, encoder,
2904 					      old_crtc_state, old_conn_state);
2905 	}
2906 }
2907 
2908 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
2909 					    struct intel_crtc *crtc)
2910 {
2911 	const struct intel_crtc_state *old_crtc_state =
2912 		intel_atomic_get_old_crtc_state(state, crtc);
2913 	const struct drm_connector_state *old_conn_state;
2914 	struct drm_connector *conn;
2915 	int i;
2916 
2917 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2918 		struct intel_encoder *encoder =
2919 			to_intel_encoder(old_conn_state->best_encoder);
2920 
2921 		if (old_conn_state->crtc != &crtc->base)
2922 			continue;
2923 
2924 		if (encoder->post_pll_disable)
2925 			encoder->post_pll_disable(state, encoder,
2926 						  old_crtc_state, old_conn_state);
2927 	}
2928 }
2929 
2930 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
2931 				       struct intel_crtc *crtc)
2932 {
2933 	const struct intel_crtc_state *crtc_state =
2934 		intel_atomic_get_new_crtc_state(state, crtc);
2935 	const struct drm_connector_state *conn_state;
2936 	struct drm_connector *conn;
2937 	int i;
2938 
2939 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2940 		struct intel_encoder *encoder =
2941 			to_intel_encoder(conn_state->best_encoder);
2942 
2943 		if (conn_state->crtc != &crtc->base)
2944 			continue;
2945 
2946 		if (encoder->update_pipe)
2947 			encoder->update_pipe(state, encoder,
2948 					     crtc_state, conn_state);
2949 	}
2950 }
2951 
2952 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
2953 {
2954 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2955 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2956 
2957 	plane->disable_plane(plane, crtc_state);
2958 }
2959 
2960 static void ilk_crtc_enable(struct intel_atomic_state *state,
2961 			    struct intel_crtc *crtc)
2962 {
2963 	const struct intel_crtc_state *new_crtc_state =
2964 		intel_atomic_get_new_crtc_state(state, crtc);
2965 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2966 	enum pipe pipe = crtc->pipe;
2967 
2968 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2969 		return;
2970 
2971 	/*
2972 	 * Sometimes spurious CPU pipe underruns happen during FDI
2973 	 * training, at least with VGA+HDMI cloning. Suppress them.
2974 	 *
2975 	 * On ILK we get an occasional spurious CPU pipe underruns
2976 	 * between eDP port A enable and vdd enable. Also PCH port
2977 	 * enable seems to result in the occasional CPU pipe underrun.
2978 	 *
2979 	 * Spurious PCH underruns also occur during PCH enabling.
2980 	 */
2981 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2982 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2983 
2984 	if (new_crtc_state->has_pch_encoder)
2985 		intel_prepare_shared_dpll(new_crtc_state);
2986 
2987 	if (intel_crtc_has_dp_encoder(new_crtc_state))
2988 		intel_dp_set_m_n(new_crtc_state, M1_N1);
2989 
2990 	intel_set_transcoder_timings(new_crtc_state);
2991 	intel_set_pipe_src_size(new_crtc_state);
2992 
2993 	if (new_crtc_state->has_pch_encoder)
2994 		intel_cpu_transcoder_set_m_n(new_crtc_state,
2995 					     &new_crtc_state->fdi_m_n, NULL);
2996 
2997 	ilk_set_pipeconf(new_crtc_state);
2998 
2999 	crtc->active = true;
3000 
3001 	intel_encoders_pre_enable(state, crtc);
3002 
3003 	if (new_crtc_state->has_pch_encoder) {
3004 		/* Note: FDI PLL enabling _must_ be done before we enable the
3005 		 * cpu pipes, hence this is separate from all the other fdi/pch
3006 		 * enabling. */
3007 		ilk_fdi_pll_enable(new_crtc_state);
3008 	} else {
3009 		assert_fdi_tx_disabled(dev_priv, pipe);
3010 		assert_fdi_rx_disabled(dev_priv, pipe);
3011 	}
3012 
3013 	ilk_pfit_enable(new_crtc_state);
3014 
3015 	/*
3016 	 * On ILK+ LUT must be loaded before the pipe is running but with
3017 	 * clocks enabled
3018 	 */
3019 	intel_color_load_luts(new_crtc_state);
3020 	intel_color_commit(new_crtc_state);
3021 	/* update DSPCNTR to configure gamma for pipe bottom color */
3022 	intel_disable_primary_plane(new_crtc_state);
3023 
3024 	if (dev_priv->display.initial_watermarks)
3025 		dev_priv->display.initial_watermarks(state, crtc);
3026 	intel_enable_pipe(new_crtc_state);
3027 
3028 	if (new_crtc_state->has_pch_encoder)
3029 		ilk_pch_enable(state, new_crtc_state);
3030 
3031 	intel_crtc_vblank_on(new_crtc_state);
3032 
3033 	intel_encoders_enable(state, crtc);
3034 
3035 	if (HAS_PCH_CPT(dev_priv))
3036 		cpt_verify_modeset(dev_priv, pipe);
3037 
3038 	/*
3039 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3040 	 * And a second vblank wait is needed at least on ILK with
3041 	 * some interlaced HDMI modes. Let's do the double wait always
3042 	 * in case there are more corner cases we don't know about.
3043 	 */
3044 	if (new_crtc_state->has_pch_encoder) {
3045 		intel_wait_for_vblank(dev_priv, pipe);
3046 		intel_wait_for_vblank(dev_priv, pipe);
3047 	}
3048 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3049 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3050 }
3051 
3052 /* IPS only exists on ULT machines and is tied to pipe A. */
3053 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3054 {
3055 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3056 }
3057 
3058 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3059 					    enum pipe pipe, bool apply)
3060 {
3061 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3062 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3063 
3064 	if (apply)
3065 		val |= mask;
3066 	else
3067 		val &= ~mask;
3068 
3069 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3070 }
3071 
3072 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3073 {
3074 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3075 	enum pipe pipe = crtc->pipe;
3076 	u32 val;
3077 
3078 	val = MBUS_DBOX_A_CREDIT(2);
3079 
3080 	if (DISPLAY_VER(dev_priv) >= 12) {
3081 		val |= MBUS_DBOX_BW_CREDIT(2);
3082 		val |= MBUS_DBOX_B_CREDIT(12);
3083 	} else {
3084 		val |= MBUS_DBOX_BW_CREDIT(1);
3085 		val |= MBUS_DBOX_B_CREDIT(8);
3086 	}
3087 
3088 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3089 }
3090 
3091 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3092 {
3093 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3094 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3095 
3096 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3097 		       HSW_LINETIME(crtc_state->linetime) |
3098 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
3099 }
3100 
3101 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3102 {
3103 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3104 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3105 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3106 	u32 val;
3107 
3108 	val = intel_de_read(dev_priv, reg);
3109 	val &= ~HSW_FRAME_START_DELAY_MASK;
3110 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3111 	intel_de_write(dev_priv, reg, val);
3112 }
3113 
3114 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3115 					 const struct intel_crtc_state *crtc_state)
3116 {
3117 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3118 	struct intel_crtc_state *master_crtc_state;
3119 	struct drm_connector_state *conn_state;
3120 	struct drm_connector *conn;
3121 	struct intel_encoder *encoder = NULL;
3122 	int i;
3123 
3124 	if (crtc_state->bigjoiner_slave)
3125 		master = crtc_state->bigjoiner_linked_crtc;
3126 
3127 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3128 
3129 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3130 		if (conn_state->crtc != &master->base)
3131 			continue;
3132 
3133 		encoder = to_intel_encoder(conn_state->best_encoder);
3134 		break;
3135 	}
3136 
3137 	if (!crtc_state->bigjoiner_slave) {
3138 		/* need to enable VDSC, which we skipped in pre-enable */
3139 		intel_dsc_enable(encoder, crtc_state);
3140 	} else {
3141 		/*
3142 		 * Enable sequence steps 1-7 on bigjoiner master
3143 		 */
3144 		intel_encoders_pre_pll_enable(state, master);
3145 		intel_enable_shared_dpll(master_crtc_state);
3146 		intel_encoders_pre_enable(state, master);
3147 
3148 		/* and DSC on slave */
3149 		intel_dsc_enable(NULL, crtc_state);
3150 	}
3151 }
3152 
3153 static void hsw_crtc_enable(struct intel_atomic_state *state,
3154 			    struct intel_crtc *crtc)
3155 {
3156 	const struct intel_crtc_state *new_crtc_state =
3157 		intel_atomic_get_new_crtc_state(state, crtc);
3158 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3159 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3160 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3161 	bool psl_clkgate_wa;
3162 
3163 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3164 		return;
3165 
3166 	if (!new_crtc_state->bigjoiner) {
3167 		intel_encoders_pre_pll_enable(state, crtc);
3168 
3169 		if (new_crtc_state->shared_dpll)
3170 			intel_enable_shared_dpll(new_crtc_state);
3171 
3172 		intel_encoders_pre_enable(state, crtc);
3173 	} else {
3174 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3175 	}
3176 
3177 	intel_set_pipe_src_size(new_crtc_state);
3178 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3179 		bdw_set_pipemisc(new_crtc_state);
3180 
3181 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3182 		intel_set_transcoder_timings(new_crtc_state);
3183 
3184 		if (cpu_transcoder != TRANSCODER_EDP)
3185 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3186 				       new_crtc_state->pixel_multiplier - 1);
3187 
3188 		if (new_crtc_state->has_pch_encoder)
3189 			intel_cpu_transcoder_set_m_n(new_crtc_state,
3190 						     &new_crtc_state->fdi_m_n, NULL);
3191 
3192 		hsw_set_frame_start_delay(new_crtc_state);
3193 	}
3194 
3195 	if (!transcoder_is_dsi(cpu_transcoder))
3196 		hsw_set_pipeconf(new_crtc_state);
3197 
3198 	crtc->active = true;
3199 
3200 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3201 	psl_clkgate_wa = IS_DISPLAY_VER(dev_priv, 10) &&
3202 		new_crtc_state->pch_pfit.enabled;
3203 	if (psl_clkgate_wa)
3204 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3205 
3206 	if (DISPLAY_VER(dev_priv) >= 9)
3207 		skl_pfit_enable(new_crtc_state);
3208 	else
3209 		ilk_pfit_enable(new_crtc_state);
3210 
3211 	/*
3212 	 * On ILK+ LUT must be loaded before the pipe is running but with
3213 	 * clocks enabled
3214 	 */
3215 	intel_color_load_luts(new_crtc_state);
3216 	intel_color_commit(new_crtc_state);
3217 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
3218 	if (DISPLAY_VER(dev_priv) < 9)
3219 		intel_disable_primary_plane(new_crtc_state);
3220 
3221 	hsw_set_linetime_wm(new_crtc_state);
3222 
3223 	if (DISPLAY_VER(dev_priv) >= 11)
3224 		icl_set_pipe_chicken(crtc);
3225 
3226 	if (dev_priv->display.initial_watermarks)
3227 		dev_priv->display.initial_watermarks(state, crtc);
3228 
3229 	if (DISPLAY_VER(dev_priv) >= 11)
3230 		icl_pipe_mbus_enable(crtc);
3231 
3232 	if (new_crtc_state->bigjoiner_slave)
3233 		intel_crtc_vblank_on(new_crtc_state);
3234 
3235 	intel_encoders_enable(state, crtc);
3236 
3237 	if (psl_clkgate_wa) {
3238 		intel_wait_for_vblank(dev_priv, pipe);
3239 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3240 	}
3241 
3242 	/* If we change the relative order between pipe/planes enabling, we need
3243 	 * to change the workaround. */
3244 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3245 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3246 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3247 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3248 	}
3249 }
3250 
3251 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3252 {
3253 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3254 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3255 	enum pipe pipe = crtc->pipe;
3256 
3257 	/* To avoid upsetting the power well on haswell only disable the pfit if
3258 	 * it's in use. The hw state code will make sure we get this right. */
3259 	if (!old_crtc_state->pch_pfit.enabled)
3260 		return;
3261 
3262 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
3263 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3264 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3265 }
3266 
3267 static void ilk_crtc_disable(struct intel_atomic_state *state,
3268 			     struct intel_crtc *crtc)
3269 {
3270 	const struct intel_crtc_state *old_crtc_state =
3271 		intel_atomic_get_old_crtc_state(state, crtc);
3272 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3273 	enum pipe pipe = crtc->pipe;
3274 
3275 	/*
3276 	 * Sometimes spurious CPU pipe underruns happen when the
3277 	 * pipe is already disabled, but FDI RX/TX is still enabled.
3278 	 * Happens at least with VGA+HDMI cloning. Suppress them.
3279 	 */
3280 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3281 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3282 
3283 	intel_encoders_disable(state, crtc);
3284 
3285 	intel_crtc_vblank_off(old_crtc_state);
3286 
3287 	intel_disable_pipe(old_crtc_state);
3288 
3289 	ilk_pfit_disable(old_crtc_state);
3290 
3291 	if (old_crtc_state->has_pch_encoder)
3292 		ilk_fdi_disable(crtc);
3293 
3294 	intel_encoders_post_disable(state, crtc);
3295 
3296 	if (old_crtc_state->has_pch_encoder) {
3297 		ilk_disable_pch_transcoder(dev_priv, pipe);
3298 
3299 		if (HAS_PCH_CPT(dev_priv)) {
3300 			i915_reg_t reg;
3301 			u32 temp;
3302 
3303 			/* disable TRANS_DP_CTL */
3304 			reg = TRANS_DP_CTL(pipe);
3305 			temp = intel_de_read(dev_priv, reg);
3306 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3307 				  TRANS_DP_PORT_SEL_MASK);
3308 			temp |= TRANS_DP_PORT_SEL_NONE;
3309 			intel_de_write(dev_priv, reg, temp);
3310 
3311 			/* disable DPLL_SEL */
3312 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3313 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3314 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3315 		}
3316 
3317 		ilk_fdi_pll_disable(crtc);
3318 	}
3319 
3320 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3321 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3322 }
3323 
3324 static void hsw_crtc_disable(struct intel_atomic_state *state,
3325 			     struct intel_crtc *crtc)
3326 {
3327 	/*
3328 	 * FIXME collapse everything to one hook.
3329 	 * Need care with mst->ddi interactions.
3330 	 */
3331 	intel_encoders_disable(state, crtc);
3332 	intel_encoders_post_disable(state, crtc);
3333 }
3334 
3335 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3336 {
3337 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3338 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3339 
3340 	if (!crtc_state->gmch_pfit.control)
3341 		return;
3342 
3343 	/*
3344 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3345 	 * according to register description and PRM.
3346 	 */
3347 	drm_WARN_ON(&dev_priv->drm,
3348 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3349 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3350 
3351 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3352 		       crtc_state->gmch_pfit.pgm_ratios);
3353 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3354 
3355 	/* Border color in case we don't scale up to the full screen. Black by
3356 	 * default, change to something else for debugging. */
3357 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3358 }
3359 
3360 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3361 {
3362 	if (phy == PHY_NONE)
3363 		return false;
3364 	else if (IS_ALDERLAKE_S(dev_priv))
3365 		return phy <= PHY_E;
3366 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3367 		return phy <= PHY_D;
3368 	else if (IS_JSL_EHL(dev_priv))
3369 		return phy <= PHY_C;
3370 	else if (DISPLAY_VER(dev_priv) >= 11)
3371 		return phy <= PHY_B;
3372 	else
3373 		return false;
3374 }
3375 
3376 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3377 {
3378 	if (IS_TIGERLAKE(dev_priv))
3379 		return phy >= PHY_D && phy <= PHY_I;
3380 	else if (IS_ICELAKE(dev_priv))
3381 		return phy >= PHY_C && phy <= PHY_F;
3382 	else
3383 		return false;
3384 }
3385 
3386 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3387 {
3388 	if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3389 		return PHY_B + port - PORT_TC1;
3390 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3391 		return PHY_C + port - PORT_TC1;
3392 	else if (IS_JSL_EHL(i915) && port == PORT_D)
3393 		return PHY_A;
3394 
3395 	return PHY_A + port - PORT_A;
3396 }
3397 
3398 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3399 {
3400 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3401 		return TC_PORT_NONE;
3402 
3403 	if (DISPLAY_VER(dev_priv) >= 12)
3404 		return TC_PORT_1 + port - PORT_TC1;
3405 	else
3406 		return TC_PORT_1 + port - PORT_C;
3407 }
3408 
3409 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3410 {
3411 	switch (port) {
3412 	case PORT_A:
3413 		return POWER_DOMAIN_PORT_DDI_A_LANES;
3414 	case PORT_B:
3415 		return POWER_DOMAIN_PORT_DDI_B_LANES;
3416 	case PORT_C:
3417 		return POWER_DOMAIN_PORT_DDI_C_LANES;
3418 	case PORT_D:
3419 		return POWER_DOMAIN_PORT_DDI_D_LANES;
3420 	case PORT_E:
3421 		return POWER_DOMAIN_PORT_DDI_E_LANES;
3422 	case PORT_F:
3423 		return POWER_DOMAIN_PORT_DDI_F_LANES;
3424 	case PORT_G:
3425 		return POWER_DOMAIN_PORT_DDI_G_LANES;
3426 	case PORT_H:
3427 		return POWER_DOMAIN_PORT_DDI_H_LANES;
3428 	case PORT_I:
3429 		return POWER_DOMAIN_PORT_DDI_I_LANES;
3430 	default:
3431 		MISSING_CASE(port);
3432 		return POWER_DOMAIN_PORT_OTHER;
3433 	}
3434 }
3435 
3436 enum intel_display_power_domain
3437 intel_aux_power_domain(struct intel_digital_port *dig_port)
3438 {
3439 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3440 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3441 
3442 	if (intel_phy_is_tc(dev_priv, phy) &&
3443 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
3444 		switch (dig_port->aux_ch) {
3445 		case AUX_CH_C:
3446 			return POWER_DOMAIN_AUX_C_TBT;
3447 		case AUX_CH_D:
3448 			return POWER_DOMAIN_AUX_D_TBT;
3449 		case AUX_CH_E:
3450 			return POWER_DOMAIN_AUX_E_TBT;
3451 		case AUX_CH_F:
3452 			return POWER_DOMAIN_AUX_F_TBT;
3453 		case AUX_CH_G:
3454 			return POWER_DOMAIN_AUX_G_TBT;
3455 		case AUX_CH_H:
3456 			return POWER_DOMAIN_AUX_H_TBT;
3457 		case AUX_CH_I:
3458 			return POWER_DOMAIN_AUX_I_TBT;
3459 		default:
3460 			MISSING_CASE(dig_port->aux_ch);
3461 			return POWER_DOMAIN_AUX_C_TBT;
3462 		}
3463 	}
3464 
3465 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3466 }
3467 
3468 /*
3469  * Converts aux_ch to power_domain without caring about TBT ports for that use
3470  * intel_aux_power_domain()
3471  */
3472 enum intel_display_power_domain
3473 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3474 {
3475 	switch (aux_ch) {
3476 	case AUX_CH_A:
3477 		return POWER_DOMAIN_AUX_A;
3478 	case AUX_CH_B:
3479 		return POWER_DOMAIN_AUX_B;
3480 	case AUX_CH_C:
3481 		return POWER_DOMAIN_AUX_C;
3482 	case AUX_CH_D:
3483 		return POWER_DOMAIN_AUX_D;
3484 	case AUX_CH_E:
3485 		return POWER_DOMAIN_AUX_E;
3486 	case AUX_CH_F:
3487 		return POWER_DOMAIN_AUX_F;
3488 	case AUX_CH_G:
3489 		return POWER_DOMAIN_AUX_G;
3490 	case AUX_CH_H:
3491 		return POWER_DOMAIN_AUX_H;
3492 	case AUX_CH_I:
3493 		return POWER_DOMAIN_AUX_I;
3494 	default:
3495 		MISSING_CASE(aux_ch);
3496 		return POWER_DOMAIN_AUX_A;
3497 	}
3498 }
3499 
3500 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3501 {
3502 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3503 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3504 	struct drm_encoder *encoder;
3505 	enum pipe pipe = crtc->pipe;
3506 	u64 mask;
3507 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3508 
3509 	if (!crtc_state->hw.active)
3510 		return 0;
3511 
3512 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3513 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3514 	if (crtc_state->pch_pfit.enabled ||
3515 	    crtc_state->pch_pfit.force_thru)
3516 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3517 
3518 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3519 				  crtc_state->uapi.encoder_mask) {
3520 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3521 
3522 		mask |= BIT_ULL(intel_encoder->power_domain);
3523 	}
3524 
3525 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3526 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
3527 
3528 	if (crtc_state->shared_dpll)
3529 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3530 
3531 	if (crtc_state->dsc.compression_enable)
3532 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3533 
3534 	return mask;
3535 }
3536 
3537 static u64
3538 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3539 {
3540 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3541 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3542 	enum intel_display_power_domain domain;
3543 	u64 domains, new_domains, old_domains;
3544 
3545 	domains = get_crtc_power_domains(crtc_state);
3546 
3547 	new_domains = domains & ~crtc->enabled_power_domains.mask;
3548 	old_domains = crtc->enabled_power_domains.mask & ~domains;
3549 
3550 	for_each_power_domain(domain, new_domains)
3551 		intel_display_power_get_in_set(dev_priv,
3552 					       &crtc->enabled_power_domains,
3553 					       domain);
3554 
3555 	return old_domains;
3556 }
3557 
3558 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3559 					   u64 domains)
3560 {
3561 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3562 					    &crtc->enabled_power_domains,
3563 					    domains);
3564 }
3565 
3566 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3567 				   struct intel_crtc *crtc)
3568 {
3569 	const struct intel_crtc_state *new_crtc_state =
3570 		intel_atomic_get_new_crtc_state(state, crtc);
3571 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3572 	enum pipe pipe = crtc->pipe;
3573 
3574 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3575 		return;
3576 
3577 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3578 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3579 
3580 	intel_set_transcoder_timings(new_crtc_state);
3581 	intel_set_pipe_src_size(new_crtc_state);
3582 
3583 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3584 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3585 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3586 	}
3587 
3588 	i9xx_set_pipeconf(new_crtc_state);
3589 
3590 	crtc->active = true;
3591 
3592 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3593 
3594 	intel_encoders_pre_pll_enable(state, crtc);
3595 
3596 	if (IS_CHERRYVIEW(dev_priv)) {
3597 		chv_prepare_pll(crtc, new_crtc_state);
3598 		chv_enable_pll(crtc, new_crtc_state);
3599 	} else {
3600 		vlv_prepare_pll(crtc, new_crtc_state);
3601 		vlv_enable_pll(crtc, new_crtc_state);
3602 	}
3603 
3604 	intel_encoders_pre_enable(state, crtc);
3605 
3606 	i9xx_pfit_enable(new_crtc_state);
3607 
3608 	intel_color_load_luts(new_crtc_state);
3609 	intel_color_commit(new_crtc_state);
3610 	/* update DSPCNTR to configure gamma for pipe bottom color */
3611 	intel_disable_primary_plane(new_crtc_state);
3612 
3613 	dev_priv->display.initial_watermarks(state, crtc);
3614 	intel_enable_pipe(new_crtc_state);
3615 
3616 	intel_crtc_vblank_on(new_crtc_state);
3617 
3618 	intel_encoders_enable(state, crtc);
3619 }
3620 
3621 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
3622 {
3623 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3624 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3625 
3626 	intel_de_write(dev_priv, FP0(crtc->pipe),
3627 		       crtc_state->dpll_hw_state.fp0);
3628 	intel_de_write(dev_priv, FP1(crtc->pipe),
3629 		       crtc_state->dpll_hw_state.fp1);
3630 }
3631 
3632 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3633 			     struct intel_crtc *crtc)
3634 {
3635 	const struct intel_crtc_state *new_crtc_state =
3636 		intel_atomic_get_new_crtc_state(state, crtc);
3637 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3638 	enum pipe pipe = crtc->pipe;
3639 
3640 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3641 		return;
3642 
3643 	i9xx_set_pll_dividers(new_crtc_state);
3644 
3645 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3646 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3647 
3648 	intel_set_transcoder_timings(new_crtc_state);
3649 	intel_set_pipe_src_size(new_crtc_state);
3650 
3651 	i9xx_set_pipeconf(new_crtc_state);
3652 
3653 	crtc->active = true;
3654 
3655 	if (!IS_DISPLAY_VER(dev_priv, 2))
3656 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3657 
3658 	intel_encoders_pre_enable(state, crtc);
3659 
3660 	i9xx_enable_pll(crtc, new_crtc_state);
3661 
3662 	i9xx_pfit_enable(new_crtc_state);
3663 
3664 	intel_color_load_luts(new_crtc_state);
3665 	intel_color_commit(new_crtc_state);
3666 	/* update DSPCNTR to configure gamma for pipe bottom color */
3667 	intel_disable_primary_plane(new_crtc_state);
3668 
3669 	if (dev_priv->display.initial_watermarks)
3670 		dev_priv->display.initial_watermarks(state, crtc);
3671 	else
3672 		intel_update_watermarks(crtc);
3673 	intel_enable_pipe(new_crtc_state);
3674 
3675 	intel_crtc_vblank_on(new_crtc_state);
3676 
3677 	intel_encoders_enable(state, crtc);
3678 
3679 	/* prevents spurious underruns */
3680 	if (IS_DISPLAY_VER(dev_priv, 2))
3681 		intel_wait_for_vblank(dev_priv, pipe);
3682 }
3683 
3684 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3685 {
3686 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3687 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3688 
3689 	if (!old_crtc_state->gmch_pfit.control)
3690 		return;
3691 
3692 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3693 
3694 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3695 		    intel_de_read(dev_priv, PFIT_CONTROL));
3696 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
3697 }
3698 
3699 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3700 			      struct intel_crtc *crtc)
3701 {
3702 	struct intel_crtc_state *old_crtc_state =
3703 		intel_atomic_get_old_crtc_state(state, crtc);
3704 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3705 	enum pipe pipe = crtc->pipe;
3706 
3707 	/*
3708 	 * On gen2 planes are double buffered but the pipe isn't, so we must
3709 	 * wait for planes to fully turn off before disabling the pipe.
3710 	 */
3711 	if (IS_DISPLAY_VER(dev_priv, 2))
3712 		intel_wait_for_vblank(dev_priv, pipe);
3713 
3714 	intel_encoders_disable(state, crtc);
3715 
3716 	intel_crtc_vblank_off(old_crtc_state);
3717 
3718 	intel_disable_pipe(old_crtc_state);
3719 
3720 	i9xx_pfit_disable(old_crtc_state);
3721 
3722 	intel_encoders_post_disable(state, crtc);
3723 
3724 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3725 		if (IS_CHERRYVIEW(dev_priv))
3726 			chv_disable_pll(dev_priv, pipe);
3727 		else if (IS_VALLEYVIEW(dev_priv))
3728 			vlv_disable_pll(dev_priv, pipe);
3729 		else
3730 			i9xx_disable_pll(old_crtc_state);
3731 	}
3732 
3733 	intel_encoders_post_pll_disable(state, crtc);
3734 
3735 	if (!IS_DISPLAY_VER(dev_priv, 2))
3736 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3737 
3738 	if (!dev_priv->display.initial_watermarks)
3739 		intel_update_watermarks(crtc);
3740 
3741 	/* clock the pipe down to 640x480@60 to potentially save power */
3742 	if (IS_I830(dev_priv))
3743 		i830_enable_pipe(dev_priv, pipe);
3744 }
3745 
3746 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3747 					struct drm_modeset_acquire_ctx *ctx)
3748 {
3749 	struct intel_encoder *encoder;
3750 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3751 	struct intel_bw_state *bw_state =
3752 		to_intel_bw_state(dev_priv->bw_obj.state);
3753 	struct intel_cdclk_state *cdclk_state =
3754 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3755 	struct intel_dbuf_state *dbuf_state =
3756 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3757 	struct intel_crtc_state *crtc_state =
3758 		to_intel_crtc_state(crtc->base.state);
3759 	struct intel_plane *plane;
3760 	struct drm_atomic_state *state;
3761 	struct intel_crtc_state *temp_crtc_state;
3762 	enum pipe pipe = crtc->pipe;
3763 	int ret;
3764 
3765 	if (!crtc_state->hw.active)
3766 		return;
3767 
3768 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3769 		const struct intel_plane_state *plane_state =
3770 			to_intel_plane_state(plane->base.state);
3771 
3772 		if (plane_state->uapi.visible)
3773 			intel_plane_disable_noatomic(crtc, plane);
3774 	}
3775 
3776 	state = drm_atomic_state_alloc(&dev_priv->drm);
3777 	if (!state) {
3778 		drm_dbg_kms(&dev_priv->drm,
3779 			    "failed to disable [CRTC:%d:%s], out of memory",
3780 			    crtc->base.base.id, crtc->base.name);
3781 		return;
3782 	}
3783 
3784 	state->acquire_ctx = ctx;
3785 
3786 	/* Everything's already locked, -EDEADLK can't happen. */
3787 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3788 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3789 
3790 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3791 
3792 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
3793 
3794 	drm_atomic_state_put(state);
3795 
3796 	drm_dbg_kms(&dev_priv->drm,
3797 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
3798 		    crtc->base.base.id, crtc->base.name);
3799 
3800 	crtc->active = false;
3801 	crtc->base.enabled = false;
3802 
3803 	drm_WARN_ON(&dev_priv->drm,
3804 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
3805 	crtc_state->uapi.active = false;
3806 	crtc_state->uapi.connector_mask = 0;
3807 	crtc_state->uapi.encoder_mask = 0;
3808 	intel_crtc_free_hw_state(crtc_state);
3809 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
3810 
3811 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
3812 		encoder->base.crtc = NULL;
3813 
3814 	intel_fbc_disable(crtc);
3815 	intel_update_watermarks(crtc);
3816 	intel_disable_shared_dpll(crtc_state);
3817 
3818 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
3819 
3820 	dev_priv->active_pipes &= ~BIT(pipe);
3821 	cdclk_state->min_cdclk[pipe] = 0;
3822 	cdclk_state->min_voltage_level[pipe] = 0;
3823 	cdclk_state->active_pipes &= ~BIT(pipe);
3824 
3825 	dbuf_state->active_pipes &= ~BIT(pipe);
3826 
3827 	bw_state->data_rate[pipe] = 0;
3828 	bw_state->num_active_planes[pipe] = 0;
3829 }
3830 
3831 /*
3832  * turn all crtc's off, but do not adjust state
3833  * This has to be paired with a call to intel_modeset_setup_hw_state.
3834  */
3835 int intel_display_suspend(struct drm_device *dev)
3836 {
3837 	struct drm_i915_private *dev_priv = to_i915(dev);
3838 	struct drm_atomic_state *state;
3839 	int ret;
3840 
3841 	state = drm_atomic_helper_suspend(dev);
3842 	ret = PTR_ERR_OR_ZERO(state);
3843 	if (ret)
3844 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
3845 			ret);
3846 	else
3847 		dev_priv->modeset_restore_state = state;
3848 	return ret;
3849 }
3850 
3851 void intel_encoder_destroy(struct drm_encoder *encoder)
3852 {
3853 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3854 
3855 	drm_encoder_cleanup(encoder);
3856 	kfree(intel_encoder);
3857 }
3858 
3859 /* Cross check the actual hw state with our own modeset state tracking (and it's
3860  * internal consistency). */
3861 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
3862 					 struct drm_connector_state *conn_state)
3863 {
3864 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
3865 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3866 
3867 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
3868 		    connector->base.base.id, connector->base.name);
3869 
3870 	if (connector->get_hw_state(connector)) {
3871 		struct intel_encoder *encoder = intel_attached_encoder(connector);
3872 
3873 		I915_STATE_WARN(!crtc_state,
3874 			 "connector enabled without attached crtc\n");
3875 
3876 		if (!crtc_state)
3877 			return;
3878 
3879 		I915_STATE_WARN(!crtc_state->hw.active,
3880 				"connector is active, but attached crtc isn't\n");
3881 
3882 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3883 			return;
3884 
3885 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
3886 			"atomic encoder doesn't match attached encoder\n");
3887 
3888 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
3889 			"attached encoder crtc differs from connector crtc\n");
3890 	} else {
3891 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
3892 				"attached crtc is active, but connector isn't\n");
3893 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
3894 			"best encoder set without crtc!\n");
3895 	}
3896 }
3897 
3898 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
3899 {
3900 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3901 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3902 
3903 	/* IPS only exists on ULT machines and is tied to pipe A. */
3904 	if (!hsw_crtc_supports_ips(crtc))
3905 		return false;
3906 
3907 	if (!dev_priv->params.enable_ips)
3908 		return false;
3909 
3910 	if (crtc_state->pipe_bpp > 24)
3911 		return false;
3912 
3913 	/*
3914 	 * We compare against max which means we must take
3915 	 * the increased cdclk requirement into account when
3916 	 * calculating the new cdclk.
3917 	 *
3918 	 * Should measure whether using a lower cdclk w/o IPS
3919 	 */
3920 	if (IS_BROADWELL(dev_priv) &&
3921 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
3922 		return false;
3923 
3924 	return true;
3925 }
3926 
3927 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
3928 {
3929 	struct drm_i915_private *dev_priv =
3930 		to_i915(crtc_state->uapi.crtc->dev);
3931 	struct intel_atomic_state *state =
3932 		to_intel_atomic_state(crtc_state->uapi.state);
3933 
3934 	crtc_state->ips_enabled = false;
3935 
3936 	if (!hsw_crtc_state_ips_capable(crtc_state))
3937 		return 0;
3938 
3939 	/*
3940 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3941 	 * enabled and disabled dynamically based on package C states,
3942 	 * user space can't make reliable use of the CRCs, so let's just
3943 	 * completely disable it.
3944 	 */
3945 	if (crtc_state->crc_enabled)
3946 		return 0;
3947 
3948 	/* IPS should be fine as long as at least one plane is enabled. */
3949 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
3950 		return 0;
3951 
3952 	if (IS_BROADWELL(dev_priv)) {
3953 		const struct intel_cdclk_state *cdclk_state;
3954 
3955 		cdclk_state = intel_atomic_get_cdclk_state(state);
3956 		if (IS_ERR(cdclk_state))
3957 			return PTR_ERR(cdclk_state);
3958 
3959 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
3960 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
3961 			return 0;
3962 	}
3963 
3964 	crtc_state->ips_enabled = true;
3965 
3966 	return 0;
3967 }
3968 
3969 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
3970 {
3971 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3972 
3973 	/* GDG double wide on either pipe, otherwise pipe A only */
3974 	return DISPLAY_VER(dev_priv) < 4 &&
3975 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
3976 }
3977 
3978 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
3979 {
3980 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
3981 	unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
3982 
3983 	/*
3984 	 * We only use IF-ID interlacing. If we ever use
3985 	 * PF-ID we'll need to adjust the pixel_rate here.
3986 	 */
3987 
3988 	if (!crtc_state->pch_pfit.enabled)
3989 		return pixel_rate;
3990 
3991 	pipe_w = crtc_state->pipe_src_w;
3992 	pipe_h = crtc_state->pipe_src_h;
3993 
3994 	pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
3995 	pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
3996 
3997 	if (pipe_w < pfit_w)
3998 		pipe_w = pfit_w;
3999 	if (pipe_h < pfit_h)
4000 		pipe_h = pfit_h;
4001 
4002 	if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
4003 			!pfit_w || !pfit_h))
4004 		return pixel_rate;
4005 
4006 	return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
4007 		       pfit_w * pfit_h);
4008 }
4009 
4010 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4011 					 const struct drm_display_mode *timings)
4012 {
4013 	mode->hdisplay = timings->crtc_hdisplay;
4014 	mode->htotal = timings->crtc_htotal;
4015 	mode->hsync_start = timings->crtc_hsync_start;
4016 	mode->hsync_end = timings->crtc_hsync_end;
4017 
4018 	mode->vdisplay = timings->crtc_vdisplay;
4019 	mode->vtotal = timings->crtc_vtotal;
4020 	mode->vsync_start = timings->crtc_vsync_start;
4021 	mode->vsync_end = timings->crtc_vsync_end;
4022 
4023 	mode->flags = timings->flags;
4024 	mode->type = DRM_MODE_TYPE_DRIVER;
4025 
4026 	mode->clock = timings->crtc_clock;
4027 
4028 	drm_mode_set_name(mode);
4029 }
4030 
4031 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4032 {
4033 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4034 
4035 	if (HAS_GMCH(dev_priv))
4036 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
4037 		crtc_state->pixel_rate =
4038 			crtc_state->hw.pipe_mode.crtc_clock;
4039 	else
4040 		crtc_state->pixel_rate =
4041 			ilk_pipe_pixel_rate(crtc_state);
4042 }
4043 
4044 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4045 {
4046 	struct drm_display_mode *mode = &crtc_state->hw.mode;
4047 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4048 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4049 
4050 	drm_mode_copy(pipe_mode, adjusted_mode);
4051 
4052 	if (crtc_state->bigjoiner) {
4053 		/*
4054 		 * transcoder is programmed to the full mode,
4055 		 * but pipe timings are half of the transcoder mode
4056 		 */
4057 		pipe_mode->crtc_hdisplay /= 2;
4058 		pipe_mode->crtc_hblank_start /= 2;
4059 		pipe_mode->crtc_hblank_end /= 2;
4060 		pipe_mode->crtc_hsync_start /= 2;
4061 		pipe_mode->crtc_hsync_end /= 2;
4062 		pipe_mode->crtc_htotal /= 2;
4063 		pipe_mode->crtc_clock /= 2;
4064 	}
4065 
4066 	if (crtc_state->splitter.enable) {
4067 		int n = crtc_state->splitter.link_count;
4068 		int overlap = crtc_state->splitter.pixel_overlap;
4069 
4070 		/*
4071 		 * eDP MSO uses segment timings from EDID for transcoder
4072 		 * timings, but full mode for everything else.
4073 		 *
4074 		 * h_full = (h_segment - pixel_overlap) * link_count
4075 		 */
4076 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4077 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4078 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4079 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4080 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4081 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4082 		pipe_mode->crtc_clock *= n;
4083 
4084 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4085 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4086 	} else {
4087 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4088 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4089 	}
4090 
4091 	intel_crtc_compute_pixel_rate(crtc_state);
4092 
4093 	drm_mode_copy(mode, adjusted_mode);
4094 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4095 	mode->vdisplay = crtc_state->pipe_src_h;
4096 }
4097 
4098 static void intel_encoder_get_config(struct intel_encoder *encoder,
4099 				     struct intel_crtc_state *crtc_state)
4100 {
4101 	encoder->get_config(encoder, crtc_state);
4102 
4103 	intel_crtc_readout_derived_state(crtc_state);
4104 }
4105 
4106 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4107 				     struct intel_crtc_state *pipe_config)
4108 {
4109 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4110 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4111 	int clock_limit = dev_priv->max_dotclk_freq;
4112 
4113 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4114 
4115 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4116 	if (pipe_config->bigjoiner) {
4117 		pipe_mode->crtc_clock /= 2;
4118 		pipe_mode->crtc_hdisplay /= 2;
4119 		pipe_mode->crtc_hblank_start /= 2;
4120 		pipe_mode->crtc_hblank_end /= 2;
4121 		pipe_mode->crtc_hsync_start /= 2;
4122 		pipe_mode->crtc_hsync_end /= 2;
4123 		pipe_mode->crtc_htotal /= 2;
4124 		pipe_config->pipe_src_w /= 2;
4125 	}
4126 
4127 	if (pipe_config->splitter.enable) {
4128 		int n = pipe_config->splitter.link_count;
4129 		int overlap = pipe_config->splitter.pixel_overlap;
4130 
4131 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4132 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4133 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4134 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4135 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4136 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4137 		pipe_mode->crtc_clock *= n;
4138 	}
4139 
4140 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4141 
4142 	if (DISPLAY_VER(dev_priv) < 4) {
4143 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4144 
4145 		/*
4146 		 * Enable double wide mode when the dot clock
4147 		 * is > 90% of the (display) core speed.
4148 		 */
4149 		if (intel_crtc_supports_double_wide(crtc) &&
4150 		    pipe_mode->crtc_clock > clock_limit) {
4151 			clock_limit = dev_priv->max_dotclk_freq;
4152 			pipe_config->double_wide = true;
4153 		}
4154 	}
4155 
4156 	if (pipe_mode->crtc_clock > clock_limit) {
4157 		drm_dbg_kms(&dev_priv->drm,
4158 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4159 			    pipe_mode->crtc_clock, clock_limit,
4160 			    yesno(pipe_config->double_wide));
4161 		return -EINVAL;
4162 	}
4163 
4164 	/*
4165 	 * Pipe horizontal size must be even in:
4166 	 * - DVO ganged mode
4167 	 * - LVDS dual channel mode
4168 	 * - Double wide pipe
4169 	 */
4170 	if (pipe_config->pipe_src_w & 1) {
4171 		if (pipe_config->double_wide) {
4172 			drm_dbg_kms(&dev_priv->drm,
4173 				    "Odd pipe source width not supported with double wide pipe\n");
4174 			return -EINVAL;
4175 		}
4176 
4177 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4178 		    intel_is_dual_link_lvds(dev_priv)) {
4179 			drm_dbg_kms(&dev_priv->drm,
4180 				    "Odd pipe source width not supported with dual link LVDS\n");
4181 			return -EINVAL;
4182 		}
4183 	}
4184 
4185 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4186 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4187 	 */
4188 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4189 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4190 		return -EINVAL;
4191 
4192 	intel_crtc_compute_pixel_rate(pipe_config);
4193 
4194 	if (pipe_config->has_pch_encoder)
4195 		return ilk_fdi_compute_config(crtc, pipe_config);
4196 
4197 	return 0;
4198 }
4199 
4200 static void
4201 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4202 {
4203 	while (*num > DATA_LINK_M_N_MASK ||
4204 	       *den > DATA_LINK_M_N_MASK) {
4205 		*num >>= 1;
4206 		*den >>= 1;
4207 	}
4208 }
4209 
4210 static void compute_m_n(unsigned int m, unsigned int n,
4211 			u32 *ret_m, u32 *ret_n,
4212 			bool constant_n)
4213 {
4214 	/*
4215 	 * Several DP dongles in particular seem to be fussy about
4216 	 * too large link M/N values. Give N value as 0x8000 that
4217 	 * should be acceptable by specific devices. 0x8000 is the
4218 	 * specified fixed N value for asynchronous clock mode,
4219 	 * which the devices expect also in synchronous clock mode.
4220 	 */
4221 	if (constant_n)
4222 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
4223 	else
4224 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4225 
4226 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4227 	intel_reduce_m_n_ratio(ret_m, ret_n);
4228 }
4229 
4230 void
4231 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4232 		       int pixel_clock, int link_clock,
4233 		       struct intel_link_m_n *m_n,
4234 		       bool constant_n, bool fec_enable)
4235 {
4236 	u32 data_clock = bits_per_pixel * pixel_clock;
4237 
4238 	if (fec_enable)
4239 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
4240 
4241 	m_n->tu = 64;
4242 	compute_m_n(data_clock,
4243 		    link_clock * nlanes * 8,
4244 		    &m_n->gmch_m, &m_n->gmch_n,
4245 		    constant_n);
4246 
4247 	compute_m_n(pixel_clock, link_clock,
4248 		    &m_n->link_m, &m_n->link_n,
4249 		    constant_n);
4250 }
4251 
4252 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4253 {
4254 	/*
4255 	 * There may be no VBT; and if the BIOS enabled SSC we can
4256 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
4257 	 * BIOS isn't using it, don't assume it will work even if the VBT
4258 	 * indicates as much.
4259 	 */
4260 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4261 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4262 						       PCH_DREF_CONTROL) &
4263 			DREF_SSC1_ENABLE;
4264 
4265 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4266 			drm_dbg_kms(&dev_priv->drm,
4267 				    "SSC %s by BIOS, overriding VBT which says %s\n",
4268 				    enableddisabled(bios_lvds_use_ssc),
4269 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
4270 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4271 		}
4272 	}
4273 }
4274 
4275 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4276 					 const struct intel_link_m_n *m_n)
4277 {
4278 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4279 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4280 	enum pipe pipe = crtc->pipe;
4281 
4282 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4283 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
4284 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4285 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4286 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4287 }
4288 
4289 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4290 				 enum transcoder transcoder)
4291 {
4292 	if (IS_HASWELL(dev_priv))
4293 		return transcoder == TRANSCODER_EDP;
4294 
4295 	/*
4296 	 * Strictly speaking some registers are available before
4297 	 * gen7, but we only support DRRS on gen7+
4298 	 */
4299 	return IS_DISPLAY_VER(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
4300 }
4301 
4302 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4303 					 const struct intel_link_m_n *m_n,
4304 					 const struct intel_link_m_n *m2_n2)
4305 {
4306 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4307 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4308 	enum pipe pipe = crtc->pipe;
4309 	enum transcoder transcoder = crtc_state->cpu_transcoder;
4310 
4311 	if (DISPLAY_VER(dev_priv) >= 5) {
4312 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4313 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4314 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4315 			       m_n->gmch_n);
4316 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4317 			       m_n->link_m);
4318 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4319 			       m_n->link_n);
4320 		/*
4321 		 *  M2_N2 registers are set only if DRRS is supported
4322 		 * (to make sure the registers are not unnecessarily accessed).
4323 		 */
4324 		if (m2_n2 && crtc_state->has_drrs &&
4325 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
4326 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4327 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4328 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4329 				       m2_n2->gmch_n);
4330 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4331 				       m2_n2->link_m);
4332 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4333 				       m2_n2->link_n);
4334 		}
4335 	} else {
4336 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4337 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4338 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4339 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4340 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4341 	}
4342 }
4343 
4344 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4345 {
4346 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4347 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4348 
4349 	if (m_n == M1_N1) {
4350 		dp_m_n = &crtc_state->dp_m_n;
4351 		dp_m2_n2 = &crtc_state->dp_m2_n2;
4352 	} else if (m_n == M2_N2) {
4353 
4354 		/*
4355 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
4356 		 * needs to be programmed into M1_N1.
4357 		 */
4358 		dp_m_n = &crtc_state->dp_m2_n2;
4359 	} else {
4360 		drm_err(&i915->drm, "Unsupported divider value\n");
4361 		return;
4362 	}
4363 
4364 	if (crtc_state->has_pch_encoder)
4365 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4366 	else
4367 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4368 }
4369 
4370 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4371 {
4372 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4373 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4374 	enum pipe pipe = crtc->pipe;
4375 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4376 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4377 	u32 crtc_vtotal, crtc_vblank_end;
4378 	int vsyncshift = 0;
4379 
4380 	/* We need to be careful not to changed the adjusted mode, for otherwise
4381 	 * the hw state checker will get angry at the mismatch. */
4382 	crtc_vtotal = adjusted_mode->crtc_vtotal;
4383 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4384 
4385 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4386 		/* the chip adds 2 halflines automatically */
4387 		crtc_vtotal -= 1;
4388 		crtc_vblank_end -= 1;
4389 
4390 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4391 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4392 		else
4393 			vsyncshift = adjusted_mode->crtc_hsync_start -
4394 				adjusted_mode->crtc_htotal / 2;
4395 		if (vsyncshift < 0)
4396 			vsyncshift += adjusted_mode->crtc_htotal;
4397 	}
4398 
4399 	if (DISPLAY_VER(dev_priv) > 3)
4400 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4401 		               vsyncshift);
4402 
4403 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4404 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4405 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4406 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4407 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4408 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4409 
4410 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4411 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4412 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4413 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4414 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4415 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4416 
4417 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4418 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4419 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4420 	 * bits. */
4421 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4422 	    (pipe == PIPE_B || pipe == PIPE_C))
4423 		intel_de_write(dev_priv, VTOTAL(pipe),
4424 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4425 
4426 }
4427 
4428 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4429 {
4430 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4431 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4432 	enum pipe pipe = crtc->pipe;
4433 
4434 	/* pipesrc controls the size that is scaled from, which should
4435 	 * always be the user's requested size.
4436 	 */
4437 	intel_de_write(dev_priv, PIPESRC(pipe),
4438 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4439 }
4440 
4441 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4442 {
4443 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4444 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4445 
4446 	if (IS_DISPLAY_VER(dev_priv, 2))
4447 		return false;
4448 
4449 	if (DISPLAY_VER(dev_priv) >= 9 ||
4450 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4451 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4452 	else
4453 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4454 }
4455 
4456 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4457 					 struct intel_crtc_state *pipe_config)
4458 {
4459 	struct drm_device *dev = crtc->base.dev;
4460 	struct drm_i915_private *dev_priv = to_i915(dev);
4461 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4462 	u32 tmp;
4463 
4464 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4465 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4466 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4467 
4468 	if (!transcoder_is_dsi(cpu_transcoder)) {
4469 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4470 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
4471 							(tmp & 0xffff) + 1;
4472 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
4473 						((tmp >> 16) & 0xffff) + 1;
4474 	}
4475 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4476 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4477 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4478 
4479 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4480 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4481 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4482 
4483 	if (!transcoder_is_dsi(cpu_transcoder)) {
4484 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4485 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
4486 							(tmp & 0xffff) + 1;
4487 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
4488 						((tmp >> 16) & 0xffff) + 1;
4489 	}
4490 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4491 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4492 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4493 
4494 	if (intel_pipe_is_interlaced(pipe_config)) {
4495 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4496 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4497 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4498 	}
4499 }
4500 
4501 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4502 				    struct intel_crtc_state *pipe_config)
4503 {
4504 	struct drm_device *dev = crtc->base.dev;
4505 	struct drm_i915_private *dev_priv = to_i915(dev);
4506 	u32 tmp;
4507 
4508 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4509 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4510 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4511 }
4512 
4513 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4514 {
4515 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4516 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4517 	u32 pipeconf;
4518 
4519 	pipeconf = 0;
4520 
4521 	/* we keep both pipes enabled on 830 */
4522 	if (IS_I830(dev_priv))
4523 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4524 
4525 	if (crtc_state->double_wide)
4526 		pipeconf |= PIPECONF_DOUBLE_WIDE;
4527 
4528 	/* only g4x and later have fancy bpc/dither controls */
4529 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4530 	    IS_CHERRYVIEW(dev_priv)) {
4531 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4532 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4533 			pipeconf |= PIPECONF_DITHER_EN |
4534 				    PIPECONF_DITHER_TYPE_SP;
4535 
4536 		switch (crtc_state->pipe_bpp) {
4537 		case 18:
4538 			pipeconf |= PIPECONF_6BPC;
4539 			break;
4540 		case 24:
4541 			pipeconf |= PIPECONF_8BPC;
4542 			break;
4543 		case 30:
4544 			pipeconf |= PIPECONF_10BPC;
4545 			break;
4546 		default:
4547 			/* Case prevented by intel_choose_pipe_bpp_dither. */
4548 			BUG();
4549 		}
4550 	}
4551 
4552 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4553 		if (DISPLAY_VER(dev_priv) < 4 ||
4554 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4555 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4556 		else
4557 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4558 	} else {
4559 		pipeconf |= PIPECONF_PROGRESSIVE;
4560 	}
4561 
4562 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4563 	     crtc_state->limited_color_range)
4564 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4565 
4566 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4567 
4568 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4569 
4570 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4571 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4572 }
4573 
4574 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4575 {
4576 	if (IS_I830(dev_priv))
4577 		return false;
4578 
4579 	return DISPLAY_VER(dev_priv) >= 4 ||
4580 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4581 }
4582 
4583 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4584 {
4585 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4586 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4587 	u32 tmp;
4588 
4589 	if (!i9xx_has_pfit(dev_priv))
4590 		return;
4591 
4592 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4593 	if (!(tmp & PFIT_ENABLE))
4594 		return;
4595 
4596 	/* Check whether the pfit is attached to our pipe. */
4597 	if (DISPLAY_VER(dev_priv) < 4) {
4598 		if (crtc->pipe != PIPE_B)
4599 			return;
4600 	} else {
4601 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4602 			return;
4603 	}
4604 
4605 	crtc_state->gmch_pfit.control = tmp;
4606 	crtc_state->gmch_pfit.pgm_ratios =
4607 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4608 }
4609 
4610 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4611 			       struct intel_crtc_state *pipe_config)
4612 {
4613 	struct drm_device *dev = crtc->base.dev;
4614 	struct drm_i915_private *dev_priv = to_i915(dev);
4615 	enum pipe pipe = crtc->pipe;
4616 	struct dpll clock;
4617 	u32 mdiv;
4618 	int refclk = 100000;
4619 
4620 	/* In case of DSI, DPLL will not be used */
4621 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4622 		return;
4623 
4624 	vlv_dpio_get(dev_priv);
4625 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4626 	vlv_dpio_put(dev_priv);
4627 
4628 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4629 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
4630 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4631 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4632 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4633 
4634 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4635 }
4636 
4637 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4638 			       struct intel_crtc_state *pipe_config)
4639 {
4640 	struct drm_device *dev = crtc->base.dev;
4641 	struct drm_i915_private *dev_priv = to_i915(dev);
4642 	enum pipe pipe = crtc->pipe;
4643 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
4644 	struct dpll clock;
4645 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4646 	int refclk = 100000;
4647 
4648 	/* In case of DSI, DPLL will not be used */
4649 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4650 		return;
4651 
4652 	vlv_dpio_get(dev_priv);
4653 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4654 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4655 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4656 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4657 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4658 	vlv_dpio_put(dev_priv);
4659 
4660 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4661 	clock.m2 = (pll_dw0 & 0xff) << 22;
4662 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4663 		clock.m2 |= pll_dw2 & 0x3fffff;
4664 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4665 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4666 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4667 
4668 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4669 }
4670 
4671 static enum intel_output_format
4672 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4673 {
4674 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4675 	u32 tmp;
4676 
4677 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4678 
4679 	if (tmp & PIPEMISC_YUV420_ENABLE) {
4680 		/* We support 4:2:0 in full blend mode only */
4681 		drm_WARN_ON(&dev_priv->drm,
4682 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4683 
4684 		return INTEL_OUTPUT_FORMAT_YCBCR420;
4685 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4686 		return INTEL_OUTPUT_FORMAT_YCBCR444;
4687 	} else {
4688 		return INTEL_OUTPUT_FORMAT_RGB;
4689 	}
4690 }
4691 
4692 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4693 {
4694 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4695 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4696 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4697 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4698 	u32 tmp;
4699 
4700 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4701 
4702 	if (tmp & DISPPLANE_GAMMA_ENABLE)
4703 		crtc_state->gamma_enable = true;
4704 
4705 	if (!HAS_GMCH(dev_priv) &&
4706 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
4707 		crtc_state->csc_enable = true;
4708 }
4709 
4710 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4711 				 struct intel_crtc_state *pipe_config)
4712 {
4713 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4714 	enum intel_display_power_domain power_domain;
4715 	intel_wakeref_t wakeref;
4716 	u32 tmp;
4717 	bool ret;
4718 
4719 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4720 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4721 	if (!wakeref)
4722 		return false;
4723 
4724 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4725 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4726 	pipe_config->shared_dpll = NULL;
4727 
4728 	ret = false;
4729 
4730 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4731 	if (!(tmp & PIPECONF_ENABLE))
4732 		goto out;
4733 
4734 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4735 	    IS_CHERRYVIEW(dev_priv)) {
4736 		switch (tmp & PIPECONF_BPC_MASK) {
4737 		case PIPECONF_6BPC:
4738 			pipe_config->pipe_bpp = 18;
4739 			break;
4740 		case PIPECONF_8BPC:
4741 			pipe_config->pipe_bpp = 24;
4742 			break;
4743 		case PIPECONF_10BPC:
4744 			pipe_config->pipe_bpp = 30;
4745 			break;
4746 		default:
4747 			break;
4748 		}
4749 	}
4750 
4751 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4752 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
4753 		pipe_config->limited_color_range = true;
4754 
4755 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4756 		PIPECONF_GAMMA_MODE_SHIFT;
4757 
4758 	if (IS_CHERRYVIEW(dev_priv))
4759 		pipe_config->cgm_mode = intel_de_read(dev_priv,
4760 						      CGM_PIPE_MODE(crtc->pipe));
4761 
4762 	i9xx_get_pipe_color_config(pipe_config);
4763 	intel_color_get_config(pipe_config);
4764 
4765 	if (DISPLAY_VER(dev_priv) < 4)
4766 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4767 
4768 	intel_get_transcoder_timings(crtc, pipe_config);
4769 	intel_get_pipe_src_size(crtc, pipe_config);
4770 
4771 	i9xx_get_pfit_config(pipe_config);
4772 
4773 	if (DISPLAY_VER(dev_priv) >= 4) {
4774 		/* No way to read it out on pipes B and C */
4775 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4776 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
4777 		else
4778 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4779 		pipe_config->pixel_multiplier =
4780 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4781 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4782 		pipe_config->dpll_hw_state.dpll_md = tmp;
4783 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4784 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4785 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4786 		pipe_config->pixel_multiplier =
4787 			((tmp & SDVO_MULTIPLIER_MASK)
4788 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4789 	} else {
4790 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
4791 		 * port and will be fixed up in the encoder->get_config
4792 		 * function. */
4793 		pipe_config->pixel_multiplier = 1;
4794 	}
4795 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4796 							DPLL(crtc->pipe));
4797 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4798 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4799 							       FP0(crtc->pipe));
4800 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4801 							       FP1(crtc->pipe));
4802 	} else {
4803 		/* Mask out read-only status bits. */
4804 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
4805 						     DPLL_PORTC_READY_MASK |
4806 						     DPLL_PORTB_READY_MASK);
4807 	}
4808 
4809 	if (IS_CHERRYVIEW(dev_priv))
4810 		chv_crtc_clock_get(crtc, pipe_config);
4811 	else if (IS_VALLEYVIEW(dev_priv))
4812 		vlv_crtc_clock_get(crtc, pipe_config);
4813 	else
4814 		i9xx_crtc_clock_get(crtc, pipe_config);
4815 
4816 	/*
4817 	 * Normally the dotclock is filled in by the encoder .get_config()
4818 	 * but in case the pipe is enabled w/o any ports we need a sane
4819 	 * default.
4820 	 */
4821 	pipe_config->hw.adjusted_mode.crtc_clock =
4822 		pipe_config->port_clock / pipe_config->pixel_multiplier;
4823 
4824 	ret = true;
4825 
4826 out:
4827 	intel_display_power_put(dev_priv, power_domain, wakeref);
4828 
4829 	return ret;
4830 }
4831 
4832 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
4833 {
4834 	struct intel_encoder *encoder;
4835 	int i;
4836 	u32 val, final;
4837 	bool has_lvds = false;
4838 	bool has_cpu_edp = false;
4839 	bool has_panel = false;
4840 	bool has_ck505 = false;
4841 	bool can_ssc = false;
4842 	bool using_ssc_source = false;
4843 
4844 	/* We need to take the global config into account */
4845 	for_each_intel_encoder(&dev_priv->drm, encoder) {
4846 		switch (encoder->type) {
4847 		case INTEL_OUTPUT_LVDS:
4848 			has_panel = true;
4849 			has_lvds = true;
4850 			break;
4851 		case INTEL_OUTPUT_EDP:
4852 			has_panel = true;
4853 			if (encoder->port == PORT_A)
4854 				has_cpu_edp = true;
4855 			break;
4856 		default:
4857 			break;
4858 		}
4859 	}
4860 
4861 	if (HAS_PCH_IBX(dev_priv)) {
4862 		has_ck505 = dev_priv->vbt.display_clock_mode;
4863 		can_ssc = has_ck505;
4864 	} else {
4865 		has_ck505 = false;
4866 		can_ssc = true;
4867 	}
4868 
4869 	/* Check if any DPLLs are using the SSC source */
4870 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
4871 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
4872 
4873 		if (!(temp & DPLL_VCO_ENABLE))
4874 			continue;
4875 
4876 		if ((temp & PLL_REF_INPUT_MASK) ==
4877 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
4878 			using_ssc_source = true;
4879 			break;
4880 		}
4881 	}
4882 
4883 	drm_dbg_kms(&dev_priv->drm,
4884 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
4885 		    has_panel, has_lvds, has_ck505, using_ssc_source);
4886 
4887 	/* Ironlake: try to setup display ref clock before DPLL
4888 	 * enabling. This is only under driver's control after
4889 	 * PCH B stepping, previous chipset stepping should be
4890 	 * ignoring this setting.
4891 	 */
4892 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
4893 
4894 	/* As we must carefully and slowly disable/enable each source in turn,
4895 	 * compute the final state we want first and check if we need to
4896 	 * make any changes at all.
4897 	 */
4898 	final = val;
4899 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
4900 	if (has_ck505)
4901 		final |= DREF_NONSPREAD_CK505_ENABLE;
4902 	else
4903 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
4904 
4905 	final &= ~DREF_SSC_SOURCE_MASK;
4906 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4907 	final &= ~DREF_SSC1_ENABLE;
4908 
4909 	if (has_panel) {
4910 		final |= DREF_SSC_SOURCE_ENABLE;
4911 
4912 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
4913 			final |= DREF_SSC1_ENABLE;
4914 
4915 		if (has_cpu_edp) {
4916 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
4917 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4918 			else
4919 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4920 		} else
4921 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4922 	} else if (using_ssc_source) {
4923 		final |= DREF_SSC_SOURCE_ENABLE;
4924 		final |= DREF_SSC1_ENABLE;
4925 	}
4926 
4927 	if (final == val)
4928 		return;
4929 
4930 	/* Always enable nonspread source */
4931 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
4932 
4933 	if (has_ck505)
4934 		val |= DREF_NONSPREAD_CK505_ENABLE;
4935 	else
4936 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
4937 
4938 	if (has_panel) {
4939 		val &= ~DREF_SSC_SOURCE_MASK;
4940 		val |= DREF_SSC_SOURCE_ENABLE;
4941 
4942 		/* SSC must be turned on before enabling the CPU output  */
4943 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4944 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
4945 			val |= DREF_SSC1_ENABLE;
4946 		} else
4947 			val &= ~DREF_SSC1_ENABLE;
4948 
4949 		/* Get SSC going before enabling the outputs */
4950 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4951 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4952 		udelay(200);
4953 
4954 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4955 
4956 		/* Enable CPU source on CPU attached eDP */
4957 		if (has_cpu_edp) {
4958 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4959 				drm_dbg_kms(&dev_priv->drm,
4960 					    "Using SSC on eDP\n");
4961 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4962 			} else
4963 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4964 		} else
4965 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4966 
4967 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4968 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4969 		udelay(200);
4970 	} else {
4971 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
4972 
4973 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4974 
4975 		/* Turn off CPU output */
4976 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4977 
4978 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4979 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4980 		udelay(200);
4981 
4982 		if (!using_ssc_source) {
4983 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
4984 
4985 			/* Turn off the SSC source */
4986 			val &= ~DREF_SSC_SOURCE_MASK;
4987 			val |= DREF_SSC_SOURCE_DISABLE;
4988 
4989 			/* Turn off SSC1 */
4990 			val &= ~DREF_SSC1_ENABLE;
4991 
4992 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4993 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4994 			udelay(200);
4995 		}
4996 	}
4997 
4998 	BUG_ON(val != final);
4999 }
5000 
5001 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5002 {
5003 	u32 tmp;
5004 
5005 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5006 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5007 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5008 
5009 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5010 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5011 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5012 
5013 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5014 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5015 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5016 
5017 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5018 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5019 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5020 }
5021 
5022 /* WaMPhyProgramming:hsw */
5023 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5024 {
5025 	u32 tmp;
5026 
5027 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5028 	tmp &= ~(0xFF << 24);
5029 	tmp |= (0x12 << 24);
5030 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5031 
5032 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5033 	tmp |= (1 << 11);
5034 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5035 
5036 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5037 	tmp |= (1 << 11);
5038 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5039 
5040 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5041 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5042 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5043 
5044 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5045 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5046 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5047 
5048 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5049 	tmp &= ~(7 << 13);
5050 	tmp |= (5 << 13);
5051 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5052 
5053 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5054 	tmp &= ~(7 << 13);
5055 	tmp |= (5 << 13);
5056 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5057 
5058 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5059 	tmp &= ~0xFF;
5060 	tmp |= 0x1C;
5061 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5062 
5063 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5064 	tmp &= ~0xFF;
5065 	tmp |= 0x1C;
5066 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5067 
5068 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5069 	tmp &= ~(0xFF << 16);
5070 	tmp |= (0x1C << 16);
5071 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5072 
5073 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5074 	tmp &= ~(0xFF << 16);
5075 	tmp |= (0x1C << 16);
5076 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5077 
5078 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5079 	tmp |= (1 << 27);
5080 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5081 
5082 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5083 	tmp |= (1 << 27);
5084 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5085 
5086 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5087 	tmp &= ~(0xF << 28);
5088 	tmp |= (4 << 28);
5089 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5090 
5091 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5092 	tmp &= ~(0xF << 28);
5093 	tmp |= (4 << 28);
5094 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5095 }
5096 
5097 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5098  * Programming" based on the parameters passed:
5099  * - Sequence to enable CLKOUT_DP
5100  * - Sequence to enable CLKOUT_DP without spread
5101  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5102  */
5103 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5104 				 bool with_spread, bool with_fdi)
5105 {
5106 	u32 reg, tmp;
5107 
5108 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5109 		     "FDI requires downspread\n"))
5110 		with_spread = true;
5111 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5112 		     with_fdi, "LP PCH doesn't have FDI\n"))
5113 		with_fdi = false;
5114 
5115 	mutex_lock(&dev_priv->sb_lock);
5116 
5117 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5118 	tmp &= ~SBI_SSCCTL_DISABLE;
5119 	tmp |= SBI_SSCCTL_PATHALT;
5120 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5121 
5122 	udelay(24);
5123 
5124 	if (with_spread) {
5125 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5126 		tmp &= ~SBI_SSCCTL_PATHALT;
5127 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5128 
5129 		if (with_fdi) {
5130 			lpt_reset_fdi_mphy(dev_priv);
5131 			lpt_program_fdi_mphy(dev_priv);
5132 		}
5133 	}
5134 
5135 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5136 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5137 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5138 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5139 
5140 	mutex_unlock(&dev_priv->sb_lock);
5141 }
5142 
5143 /* Sequence to disable CLKOUT_DP */
5144 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5145 {
5146 	u32 reg, tmp;
5147 
5148 	mutex_lock(&dev_priv->sb_lock);
5149 
5150 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5151 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5152 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5153 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5154 
5155 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5156 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5157 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5158 			tmp |= SBI_SSCCTL_PATHALT;
5159 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5160 			udelay(32);
5161 		}
5162 		tmp |= SBI_SSCCTL_DISABLE;
5163 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5164 	}
5165 
5166 	mutex_unlock(&dev_priv->sb_lock);
5167 }
5168 
5169 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5170 
5171 static const u16 sscdivintphase[] = {
5172 	[BEND_IDX( 50)] = 0x3B23,
5173 	[BEND_IDX( 45)] = 0x3B23,
5174 	[BEND_IDX( 40)] = 0x3C23,
5175 	[BEND_IDX( 35)] = 0x3C23,
5176 	[BEND_IDX( 30)] = 0x3D23,
5177 	[BEND_IDX( 25)] = 0x3D23,
5178 	[BEND_IDX( 20)] = 0x3E23,
5179 	[BEND_IDX( 15)] = 0x3E23,
5180 	[BEND_IDX( 10)] = 0x3F23,
5181 	[BEND_IDX(  5)] = 0x3F23,
5182 	[BEND_IDX(  0)] = 0x0025,
5183 	[BEND_IDX( -5)] = 0x0025,
5184 	[BEND_IDX(-10)] = 0x0125,
5185 	[BEND_IDX(-15)] = 0x0125,
5186 	[BEND_IDX(-20)] = 0x0225,
5187 	[BEND_IDX(-25)] = 0x0225,
5188 	[BEND_IDX(-30)] = 0x0325,
5189 	[BEND_IDX(-35)] = 0x0325,
5190 	[BEND_IDX(-40)] = 0x0425,
5191 	[BEND_IDX(-45)] = 0x0425,
5192 	[BEND_IDX(-50)] = 0x0525,
5193 };
5194 
5195 /*
5196  * Bend CLKOUT_DP
5197  * steps -50 to 50 inclusive, in steps of 5
5198  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5199  * change in clock period = -(steps / 10) * 5.787 ps
5200  */
5201 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5202 {
5203 	u32 tmp;
5204 	int idx = BEND_IDX(steps);
5205 
5206 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5207 		return;
5208 
5209 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5210 		return;
5211 
5212 	mutex_lock(&dev_priv->sb_lock);
5213 
5214 	if (steps % 10 != 0)
5215 		tmp = 0xAAAAAAAB;
5216 	else
5217 		tmp = 0x00000000;
5218 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5219 
5220 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5221 	tmp &= 0xffff0000;
5222 	tmp |= sscdivintphase[idx];
5223 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5224 
5225 	mutex_unlock(&dev_priv->sb_lock);
5226 }
5227 
5228 #undef BEND_IDX
5229 
5230 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5231 {
5232 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5233 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5234 
5235 	if ((ctl & SPLL_PLL_ENABLE) == 0)
5236 		return false;
5237 
5238 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5239 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5240 		return true;
5241 
5242 	if (IS_BROADWELL(dev_priv) &&
5243 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5244 		return true;
5245 
5246 	return false;
5247 }
5248 
5249 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5250 			       enum intel_dpll_id id)
5251 {
5252 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5253 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5254 
5255 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
5256 		return false;
5257 
5258 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5259 		return true;
5260 
5261 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5262 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5263 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5264 		return true;
5265 
5266 	return false;
5267 }
5268 
5269 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5270 {
5271 	struct intel_encoder *encoder;
5272 	bool has_fdi = false;
5273 
5274 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5275 		switch (encoder->type) {
5276 		case INTEL_OUTPUT_ANALOG:
5277 			has_fdi = true;
5278 			break;
5279 		default:
5280 			break;
5281 		}
5282 	}
5283 
5284 	/*
5285 	 * The BIOS may have decided to use the PCH SSC
5286 	 * reference so we must not disable it until the
5287 	 * relevant PLLs have stopped relying on it. We'll
5288 	 * just leave the PCH SSC reference enabled in case
5289 	 * any active PLL is using it. It will get disabled
5290 	 * after runtime suspend if we don't have FDI.
5291 	 *
5292 	 * TODO: Move the whole reference clock handling
5293 	 * to the modeset sequence proper so that we can
5294 	 * actually enable/disable/reconfigure these things
5295 	 * safely. To do that we need to introduce a real
5296 	 * clock hierarchy. That would also allow us to do
5297 	 * clock bending finally.
5298 	 */
5299 	dev_priv->pch_ssc_use = 0;
5300 
5301 	if (spll_uses_pch_ssc(dev_priv)) {
5302 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5303 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5304 	}
5305 
5306 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5307 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5308 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5309 	}
5310 
5311 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5312 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5313 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5314 	}
5315 
5316 	if (dev_priv->pch_ssc_use)
5317 		return;
5318 
5319 	if (has_fdi) {
5320 		lpt_bend_clkout_dp(dev_priv, 0);
5321 		lpt_enable_clkout_dp(dev_priv, true, true);
5322 	} else {
5323 		lpt_disable_clkout_dp(dev_priv);
5324 	}
5325 }
5326 
5327 /*
5328  * Initialize reference clocks when the driver loads
5329  */
5330 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5331 {
5332 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5333 		ilk_init_pch_refclk(dev_priv);
5334 	else if (HAS_PCH_LPT(dev_priv))
5335 		lpt_init_pch_refclk(dev_priv);
5336 }
5337 
5338 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5339 {
5340 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5341 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5342 	enum pipe pipe = crtc->pipe;
5343 	u32 val;
5344 
5345 	val = 0;
5346 
5347 	switch (crtc_state->pipe_bpp) {
5348 	case 18:
5349 		val |= PIPECONF_6BPC;
5350 		break;
5351 	case 24:
5352 		val |= PIPECONF_8BPC;
5353 		break;
5354 	case 30:
5355 		val |= PIPECONF_10BPC;
5356 		break;
5357 	case 36:
5358 		val |= PIPECONF_12BPC;
5359 		break;
5360 	default:
5361 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5362 		BUG();
5363 	}
5364 
5365 	if (crtc_state->dither)
5366 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5367 
5368 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5369 		val |= PIPECONF_INTERLACED_ILK;
5370 	else
5371 		val |= PIPECONF_PROGRESSIVE;
5372 
5373 	/*
5374 	 * This would end up with an odd purple hue over
5375 	 * the entire display. Make sure we don't do it.
5376 	 */
5377 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5378 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5379 
5380 	if (crtc_state->limited_color_range &&
5381 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5382 		val |= PIPECONF_COLOR_RANGE_SELECT;
5383 
5384 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5385 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5386 
5387 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5388 
5389 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5390 
5391 	intel_de_write(dev_priv, PIPECONF(pipe), val);
5392 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
5393 }
5394 
5395 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5396 {
5397 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5398 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5399 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5400 	u32 val = 0;
5401 
5402 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
5403 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5404 
5405 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5406 		val |= PIPECONF_INTERLACED_ILK;
5407 	else
5408 		val |= PIPECONF_PROGRESSIVE;
5409 
5410 	if (IS_HASWELL(dev_priv) &&
5411 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5412 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5413 
5414 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5415 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5416 }
5417 
5418 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5419 {
5420 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5421 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5422 	u32 val = 0;
5423 
5424 	switch (crtc_state->pipe_bpp) {
5425 	case 18:
5426 		val |= PIPEMISC_DITHER_6_BPC;
5427 		break;
5428 	case 24:
5429 		val |= PIPEMISC_DITHER_8_BPC;
5430 		break;
5431 	case 30:
5432 		val |= PIPEMISC_DITHER_10_BPC;
5433 		break;
5434 	case 36:
5435 		val |= PIPEMISC_DITHER_12_BPC;
5436 		break;
5437 	default:
5438 		MISSING_CASE(crtc_state->pipe_bpp);
5439 		break;
5440 	}
5441 
5442 	if (crtc_state->dither)
5443 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5444 
5445 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5446 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5447 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5448 
5449 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5450 		val |= PIPEMISC_YUV420_ENABLE |
5451 			PIPEMISC_YUV420_MODE_FULL_BLEND;
5452 
5453 	if (DISPLAY_VER(dev_priv) >= 11 &&
5454 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5455 					   BIT(PLANE_CURSOR))) == 0)
5456 		val |= PIPEMISC_HDR_MODE_PRECISION;
5457 
5458 	if (DISPLAY_VER(dev_priv) >= 12)
5459 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5460 
5461 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5462 }
5463 
5464 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5465 {
5466 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5467 	u32 tmp;
5468 
5469 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5470 
5471 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
5472 	case PIPEMISC_DITHER_6_BPC:
5473 		return 18;
5474 	case PIPEMISC_DITHER_8_BPC:
5475 		return 24;
5476 	case PIPEMISC_DITHER_10_BPC:
5477 		return 30;
5478 	case PIPEMISC_DITHER_12_BPC:
5479 		return 36;
5480 	default:
5481 		MISSING_CASE(tmp);
5482 		return 0;
5483 	}
5484 }
5485 
5486 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5487 {
5488 	/*
5489 	 * Account for spread spectrum to avoid
5490 	 * oversubscribing the link. Max center spread
5491 	 * is 2.5%; use 5% for safety's sake.
5492 	 */
5493 	u32 bps = target_clock * bpp * 21 / 20;
5494 	return DIV_ROUND_UP(bps, link_bw * 8);
5495 }
5496 
5497 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5498 					 struct intel_link_m_n *m_n)
5499 {
5500 	struct drm_device *dev = crtc->base.dev;
5501 	struct drm_i915_private *dev_priv = to_i915(dev);
5502 	enum pipe pipe = crtc->pipe;
5503 
5504 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5505 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5506 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5507 		& ~TU_SIZE_MASK;
5508 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5509 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5510 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5511 }
5512 
5513 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5514 					 enum transcoder transcoder,
5515 					 struct intel_link_m_n *m_n,
5516 					 struct intel_link_m_n *m2_n2)
5517 {
5518 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5519 	enum pipe pipe = crtc->pipe;
5520 
5521 	if (DISPLAY_VER(dev_priv) >= 5) {
5522 		m_n->link_m = intel_de_read(dev_priv,
5523 					    PIPE_LINK_M1(transcoder));
5524 		m_n->link_n = intel_de_read(dev_priv,
5525 					    PIPE_LINK_N1(transcoder));
5526 		m_n->gmch_m = intel_de_read(dev_priv,
5527 					    PIPE_DATA_M1(transcoder))
5528 			& ~TU_SIZE_MASK;
5529 		m_n->gmch_n = intel_de_read(dev_priv,
5530 					    PIPE_DATA_N1(transcoder));
5531 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5532 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5533 
5534 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5535 			m2_n2->link_m = intel_de_read(dev_priv,
5536 						      PIPE_LINK_M2(transcoder));
5537 			m2_n2->link_n =	intel_de_read(dev_priv,
5538 							     PIPE_LINK_N2(transcoder));
5539 			m2_n2->gmch_m =	intel_de_read(dev_priv,
5540 							     PIPE_DATA_M2(transcoder))
5541 					& ~TU_SIZE_MASK;
5542 			m2_n2->gmch_n =	intel_de_read(dev_priv,
5543 							     PIPE_DATA_N2(transcoder));
5544 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5545 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5546 		}
5547 	} else {
5548 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5549 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5550 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5551 			& ~TU_SIZE_MASK;
5552 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5553 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5554 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5555 	}
5556 }
5557 
5558 void intel_dp_get_m_n(struct intel_crtc *crtc,
5559 		      struct intel_crtc_state *pipe_config)
5560 {
5561 	if (pipe_config->has_pch_encoder)
5562 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5563 	else
5564 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5565 					     &pipe_config->dp_m_n,
5566 					     &pipe_config->dp_m2_n2);
5567 }
5568 
5569 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5570 				   struct intel_crtc_state *pipe_config)
5571 {
5572 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5573 				     &pipe_config->fdi_m_n, NULL);
5574 }
5575 
5576 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5577 				  u32 pos, u32 size)
5578 {
5579 	drm_rect_init(&crtc_state->pch_pfit.dst,
5580 		      pos >> 16, pos & 0xffff,
5581 		      size >> 16, size & 0xffff);
5582 }
5583 
5584 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5585 {
5586 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5587 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5588 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5589 	int id = -1;
5590 	int i;
5591 
5592 	/* find scaler attached to this pipe */
5593 	for (i = 0; i < crtc->num_scalers; i++) {
5594 		u32 ctl, pos, size;
5595 
5596 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5597 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5598 			continue;
5599 
5600 		id = i;
5601 		crtc_state->pch_pfit.enabled = true;
5602 
5603 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5604 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5605 
5606 		ilk_get_pfit_pos_size(crtc_state, pos, size);
5607 
5608 		scaler_state->scalers[i].in_use = true;
5609 		break;
5610 	}
5611 
5612 	scaler_state->scaler_id = id;
5613 	if (id >= 0)
5614 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5615 	else
5616 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5617 }
5618 
5619 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5620 {
5621 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5622 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5623 	u32 ctl, pos, size;
5624 
5625 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5626 	if ((ctl & PF_ENABLE) == 0)
5627 		return;
5628 
5629 	crtc_state->pch_pfit.enabled = true;
5630 
5631 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5632 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5633 
5634 	ilk_get_pfit_pos_size(crtc_state, pos, size);
5635 
5636 	/*
5637 	 * We currently do not free assignements of panel fitters on
5638 	 * ivb/hsw (since we don't use the higher upscaling modes which
5639 	 * differentiates them) so just WARN about this case for now.
5640 	 */
5641 	drm_WARN_ON(&dev_priv->drm, IS_DISPLAY_VER(dev_priv, 7) &&
5642 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5643 }
5644 
5645 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5646 				struct intel_crtc_state *pipe_config)
5647 {
5648 	struct drm_device *dev = crtc->base.dev;
5649 	struct drm_i915_private *dev_priv = to_i915(dev);
5650 	enum intel_display_power_domain power_domain;
5651 	intel_wakeref_t wakeref;
5652 	u32 tmp;
5653 	bool ret;
5654 
5655 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5656 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5657 	if (!wakeref)
5658 		return false;
5659 
5660 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5661 	pipe_config->shared_dpll = NULL;
5662 
5663 	ret = false;
5664 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5665 	if (!(tmp & PIPECONF_ENABLE))
5666 		goto out;
5667 
5668 	switch (tmp & PIPECONF_BPC_MASK) {
5669 	case PIPECONF_6BPC:
5670 		pipe_config->pipe_bpp = 18;
5671 		break;
5672 	case PIPECONF_8BPC:
5673 		pipe_config->pipe_bpp = 24;
5674 		break;
5675 	case PIPECONF_10BPC:
5676 		pipe_config->pipe_bpp = 30;
5677 		break;
5678 	case PIPECONF_12BPC:
5679 		pipe_config->pipe_bpp = 36;
5680 		break;
5681 	default:
5682 		break;
5683 	}
5684 
5685 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5686 		pipe_config->limited_color_range = true;
5687 
5688 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5689 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5690 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5691 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5692 		break;
5693 	default:
5694 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5695 		break;
5696 	}
5697 
5698 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5699 		PIPECONF_GAMMA_MODE_SHIFT;
5700 
5701 	pipe_config->csc_mode = intel_de_read(dev_priv,
5702 					      PIPE_CSC_MODE(crtc->pipe));
5703 
5704 	i9xx_get_pipe_color_config(pipe_config);
5705 	intel_color_get_config(pipe_config);
5706 
5707 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5708 		struct intel_shared_dpll *pll;
5709 		enum intel_dpll_id pll_id;
5710 		bool pll_active;
5711 
5712 		pipe_config->has_pch_encoder = true;
5713 
5714 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5715 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5716 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5717 
5718 		ilk_get_fdi_m_n_config(crtc, pipe_config);
5719 
5720 		if (HAS_PCH_IBX(dev_priv)) {
5721 			/*
5722 			 * The pipe->pch transcoder and pch transcoder->pll
5723 			 * mapping is fixed.
5724 			 */
5725 			pll_id = (enum intel_dpll_id) crtc->pipe;
5726 		} else {
5727 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5728 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5729 				pll_id = DPLL_ID_PCH_PLL_B;
5730 			else
5731 				pll_id= DPLL_ID_PCH_PLL_A;
5732 		}
5733 
5734 		pipe_config->shared_dpll =
5735 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
5736 		pll = pipe_config->shared_dpll;
5737 
5738 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5739 						     &pipe_config->dpll_hw_state);
5740 		drm_WARN_ON(dev, !pll_active);
5741 
5742 		tmp = pipe_config->dpll_hw_state.dpll;
5743 		pipe_config->pixel_multiplier =
5744 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5745 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5746 
5747 		ilk_pch_clock_get(crtc, pipe_config);
5748 	} else {
5749 		pipe_config->pixel_multiplier = 1;
5750 	}
5751 
5752 	intel_get_transcoder_timings(crtc, pipe_config);
5753 	intel_get_pipe_src_size(crtc, pipe_config);
5754 
5755 	ilk_get_pfit_config(pipe_config);
5756 
5757 	ret = true;
5758 
5759 out:
5760 	intel_display_power_put(dev_priv, power_domain, wakeref);
5761 
5762 	return ret;
5763 }
5764 
5765 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5766 				     struct intel_crtc_state *pipe_config,
5767 				     struct intel_display_power_domain_set *power_domain_set)
5768 {
5769 	struct drm_device *dev = crtc->base.dev;
5770 	struct drm_i915_private *dev_priv = to_i915(dev);
5771 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
5772 	unsigned long enabled_panel_transcoders = 0;
5773 	enum transcoder panel_transcoder;
5774 	u32 tmp;
5775 
5776 	if (DISPLAY_VER(dev_priv) >= 11)
5777 		panel_transcoder_mask |=
5778 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
5779 
5780 	/*
5781 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
5782 	 * and DSI transcoders handled below.
5783 	 */
5784 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5785 
5786 	/*
5787 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
5788 	 * consistency and less surprising code; it's in always on power).
5789 	 */
5790 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
5791 				       panel_transcoder_mask) {
5792 		bool force_thru = false;
5793 		enum pipe trans_pipe;
5794 
5795 		tmp = intel_de_read(dev_priv,
5796 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
5797 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5798 			continue;
5799 
5800 		/*
5801 		 * Log all enabled ones, only use the first one.
5802 		 *
5803 		 * FIXME: This won't work for two separate DSI displays.
5804 		 */
5805 		enabled_panel_transcoders |= BIT(panel_transcoder);
5806 		if (enabled_panel_transcoders != BIT(panel_transcoder))
5807 			continue;
5808 
5809 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5810 		default:
5811 			drm_WARN(dev, 1,
5812 				 "unknown pipe linked to transcoder %s\n",
5813 				 transcoder_name(panel_transcoder));
5814 			fallthrough;
5815 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
5816 			force_thru = true;
5817 			fallthrough;
5818 		case TRANS_DDI_EDP_INPUT_A_ON:
5819 			trans_pipe = PIPE_A;
5820 			break;
5821 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
5822 			trans_pipe = PIPE_B;
5823 			break;
5824 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
5825 			trans_pipe = PIPE_C;
5826 			break;
5827 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
5828 			trans_pipe = PIPE_D;
5829 			break;
5830 		}
5831 
5832 		if (trans_pipe == crtc->pipe) {
5833 			pipe_config->cpu_transcoder = panel_transcoder;
5834 			pipe_config->pch_pfit.force_thru = force_thru;
5835 		}
5836 	}
5837 
5838 	/*
5839 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
5840 	 */
5841 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
5842 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
5843 
5844 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5845 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
5846 		return false;
5847 
5848 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
5849 
5850 	return tmp & PIPECONF_ENABLE;
5851 }
5852 
5853 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
5854 					 struct intel_crtc_state *pipe_config,
5855 					 struct intel_display_power_domain_set *power_domain_set)
5856 {
5857 	struct drm_device *dev = crtc->base.dev;
5858 	struct drm_i915_private *dev_priv = to_i915(dev);
5859 	enum transcoder cpu_transcoder;
5860 	enum port port;
5861 	u32 tmp;
5862 
5863 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
5864 		if (port == PORT_A)
5865 			cpu_transcoder = TRANSCODER_DSI_A;
5866 		else
5867 			cpu_transcoder = TRANSCODER_DSI_C;
5868 
5869 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5870 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
5871 			continue;
5872 
5873 		/*
5874 		 * The PLL needs to be enabled with a valid divider
5875 		 * configuration, otherwise accessing DSI registers will hang
5876 		 * the machine. See BSpec North Display Engine
5877 		 * registers/MIPI[BXT]. We can break out here early, since we
5878 		 * need the same DSI PLL to be enabled for both DSI ports.
5879 		 */
5880 		if (!bxt_dsi_pll_is_enabled(dev_priv))
5881 			break;
5882 
5883 		/* XXX: this works for video mode only */
5884 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
5885 		if (!(tmp & DPI_ENABLE))
5886 			continue;
5887 
5888 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
5889 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
5890 			continue;
5891 
5892 		pipe_config->cpu_transcoder = cpu_transcoder;
5893 		break;
5894 	}
5895 
5896 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
5897 }
5898 
5899 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
5900 				   struct intel_crtc_state *pipe_config)
5901 {
5902 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5903 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5904 	enum port port;
5905 	u32 tmp;
5906 
5907 	if (transcoder_is_dsi(cpu_transcoder)) {
5908 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
5909 						PORT_A : PORT_B;
5910 	} else {
5911 		tmp = intel_de_read(dev_priv,
5912 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
5913 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5914 			return;
5915 		if (DISPLAY_VER(dev_priv) >= 12)
5916 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5917 		else
5918 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5919 	}
5920 
5921 	/*
5922 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
5923 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
5924 	 * the PCH transcoder is on.
5925 	 */
5926 	if (DISPLAY_VER(dev_priv) < 9 &&
5927 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
5928 		pipe_config->has_pch_encoder = true;
5929 
5930 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
5931 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5932 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5933 
5934 		ilk_get_fdi_m_n_config(crtc, pipe_config);
5935 	}
5936 }
5937 
5938 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
5939 				struct intel_crtc_state *pipe_config)
5940 {
5941 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5942 	struct intel_display_power_domain_set power_domain_set = { };
5943 	bool active;
5944 	u32 tmp;
5945 
5946 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5947 						       POWER_DOMAIN_PIPE(crtc->pipe)))
5948 		return false;
5949 
5950 	pipe_config->shared_dpll = NULL;
5951 
5952 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
5953 
5954 	if (IS_GEN9_LP(dev_priv) &&
5955 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
5956 		drm_WARN_ON(&dev_priv->drm, active);
5957 		active = true;
5958 	}
5959 
5960 	intel_dsc_get_config(pipe_config);
5961 
5962 	if (!active) {
5963 		/* bigjoiner slave doesn't enable transcoder */
5964 		if (!pipe_config->bigjoiner_slave)
5965 			goto out;
5966 
5967 		active = true;
5968 		pipe_config->pixel_multiplier = 1;
5969 
5970 		/* we cannot read out most state, so don't bother.. */
5971 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
5972 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
5973 	    DISPLAY_VER(dev_priv) >= 11) {
5974 		hsw_get_ddi_port_state(crtc, pipe_config);
5975 		intel_get_transcoder_timings(crtc, pipe_config);
5976 	}
5977 
5978 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
5979 		intel_vrr_get_config(crtc, pipe_config);
5980 
5981 	intel_get_pipe_src_size(crtc, pipe_config);
5982 
5983 	if (IS_HASWELL(dev_priv)) {
5984 		u32 tmp = intel_de_read(dev_priv,
5985 					PIPECONF(pipe_config->cpu_transcoder));
5986 
5987 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
5988 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5989 		else
5990 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5991 	} else {
5992 		pipe_config->output_format =
5993 			bdw_get_pipemisc_output_format(crtc);
5994 	}
5995 
5996 	pipe_config->gamma_mode = intel_de_read(dev_priv,
5997 						GAMMA_MODE(crtc->pipe));
5998 
5999 	pipe_config->csc_mode = intel_de_read(dev_priv,
6000 					      PIPE_CSC_MODE(crtc->pipe));
6001 
6002 	if (DISPLAY_VER(dev_priv) >= 9) {
6003 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6004 
6005 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6006 			pipe_config->gamma_enable = true;
6007 
6008 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6009 			pipe_config->csc_enable = true;
6010 	} else {
6011 		i9xx_get_pipe_color_config(pipe_config);
6012 	}
6013 
6014 	intel_color_get_config(pipe_config);
6015 
6016 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6017 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6018 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6019 		pipe_config->ips_linetime =
6020 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6021 
6022 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6023 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6024 		if (DISPLAY_VER(dev_priv) >= 9)
6025 			skl_get_pfit_config(pipe_config);
6026 		else
6027 			ilk_get_pfit_config(pipe_config);
6028 	}
6029 
6030 	if (hsw_crtc_supports_ips(crtc)) {
6031 		if (IS_HASWELL(dev_priv))
6032 			pipe_config->ips_enabled = intel_de_read(dev_priv,
6033 								 IPS_CTL) & IPS_ENABLE;
6034 		else {
6035 			/*
6036 			 * We cannot readout IPS state on broadwell, set to
6037 			 * true so we can set it to a defined state on first
6038 			 * commit.
6039 			 */
6040 			pipe_config->ips_enabled = true;
6041 		}
6042 	}
6043 
6044 	if (pipe_config->bigjoiner_slave) {
6045 		/* Cannot be read out as a slave, set to 0. */
6046 		pipe_config->pixel_multiplier = 0;
6047 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6048 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6049 		pipe_config->pixel_multiplier =
6050 			intel_de_read(dev_priv,
6051 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6052 	} else {
6053 		pipe_config->pixel_multiplier = 1;
6054 	}
6055 
6056 out:
6057 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6058 
6059 	return active;
6060 }
6061 
6062 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6063 {
6064 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6065 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6066 
6067 	if (!i915->display.get_pipe_config(crtc, crtc_state))
6068 		return false;
6069 
6070 	crtc_state->hw.active = true;
6071 
6072 	intel_crtc_readout_derived_state(crtc_state);
6073 
6074 	return true;
6075 }
6076 
6077 /* VESA 640x480x72Hz mode to set on the pipe */
6078 static const struct drm_display_mode load_detect_mode = {
6079 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6080 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6081 };
6082 
6083 struct drm_framebuffer *
6084 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6085 			 struct drm_mode_fb_cmd2 *mode_cmd)
6086 {
6087 	struct intel_framebuffer *intel_fb;
6088 	int ret;
6089 
6090 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6091 	if (!intel_fb)
6092 		return ERR_PTR(-ENOMEM);
6093 
6094 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6095 	if (ret)
6096 		goto err;
6097 
6098 	return &intel_fb->base;
6099 
6100 err:
6101 	kfree(intel_fb);
6102 	return ERR_PTR(ret);
6103 }
6104 
6105 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6106 					struct drm_crtc *crtc)
6107 {
6108 	struct drm_plane *plane;
6109 	struct drm_plane_state *plane_state;
6110 	int ret, i;
6111 
6112 	ret = drm_atomic_add_affected_planes(state, crtc);
6113 	if (ret)
6114 		return ret;
6115 
6116 	for_each_new_plane_in_state(state, plane, plane_state, i) {
6117 		if (plane_state->crtc != crtc)
6118 			continue;
6119 
6120 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6121 		if (ret)
6122 			return ret;
6123 
6124 		drm_atomic_set_fb_for_plane(plane_state, NULL);
6125 	}
6126 
6127 	return 0;
6128 }
6129 
6130 int intel_get_load_detect_pipe(struct drm_connector *connector,
6131 			       struct intel_load_detect_pipe *old,
6132 			       struct drm_modeset_acquire_ctx *ctx)
6133 {
6134 	struct intel_crtc *intel_crtc;
6135 	struct intel_encoder *intel_encoder =
6136 		intel_attached_encoder(to_intel_connector(connector));
6137 	struct drm_crtc *possible_crtc;
6138 	struct drm_encoder *encoder = &intel_encoder->base;
6139 	struct drm_crtc *crtc = NULL;
6140 	struct drm_device *dev = encoder->dev;
6141 	struct drm_i915_private *dev_priv = to_i915(dev);
6142 	struct drm_mode_config *config = &dev->mode_config;
6143 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6144 	struct drm_connector_state *connector_state;
6145 	struct intel_crtc_state *crtc_state;
6146 	int ret, i = -1;
6147 
6148 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6149 		    connector->base.id, connector->name,
6150 		    encoder->base.id, encoder->name);
6151 
6152 	old->restore_state = NULL;
6153 
6154 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6155 
6156 	/*
6157 	 * Algorithm gets a little messy:
6158 	 *
6159 	 *   - if the connector already has an assigned crtc, use it (but make
6160 	 *     sure it's on first)
6161 	 *
6162 	 *   - try to find the first unused crtc that can drive this connector,
6163 	 *     and use that if we find one
6164 	 */
6165 
6166 	/* See if we already have a CRTC for this connector */
6167 	if (connector->state->crtc) {
6168 		crtc = connector->state->crtc;
6169 
6170 		ret = drm_modeset_lock(&crtc->mutex, ctx);
6171 		if (ret)
6172 			goto fail;
6173 
6174 		/* Make sure the crtc and connector are running */
6175 		goto found;
6176 	}
6177 
6178 	/* Find an unused one (if possible) */
6179 	for_each_crtc(dev, possible_crtc) {
6180 		i++;
6181 		if (!(encoder->possible_crtcs & (1 << i)))
6182 			continue;
6183 
6184 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
6185 		if (ret)
6186 			goto fail;
6187 
6188 		if (possible_crtc->state->enable) {
6189 			drm_modeset_unlock(&possible_crtc->mutex);
6190 			continue;
6191 		}
6192 
6193 		crtc = possible_crtc;
6194 		break;
6195 	}
6196 
6197 	/*
6198 	 * If we didn't find an unused CRTC, don't use any.
6199 	 */
6200 	if (!crtc) {
6201 		drm_dbg_kms(&dev_priv->drm,
6202 			    "no pipe available for load-detect\n");
6203 		ret = -ENODEV;
6204 		goto fail;
6205 	}
6206 
6207 found:
6208 	intel_crtc = to_intel_crtc(crtc);
6209 
6210 	state = drm_atomic_state_alloc(dev);
6211 	restore_state = drm_atomic_state_alloc(dev);
6212 	if (!state || !restore_state) {
6213 		ret = -ENOMEM;
6214 		goto fail;
6215 	}
6216 
6217 	state->acquire_ctx = ctx;
6218 	restore_state->acquire_ctx = ctx;
6219 
6220 	connector_state = drm_atomic_get_connector_state(state, connector);
6221 	if (IS_ERR(connector_state)) {
6222 		ret = PTR_ERR(connector_state);
6223 		goto fail;
6224 	}
6225 
6226 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
6227 	if (ret)
6228 		goto fail;
6229 
6230 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6231 	if (IS_ERR(crtc_state)) {
6232 		ret = PTR_ERR(crtc_state);
6233 		goto fail;
6234 	}
6235 
6236 	crtc_state->uapi.active = true;
6237 
6238 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6239 					   &load_detect_mode);
6240 	if (ret)
6241 		goto fail;
6242 
6243 	ret = intel_modeset_disable_planes(state, crtc);
6244 	if (ret)
6245 		goto fail;
6246 
6247 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6248 	if (!ret)
6249 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
6250 	if (!ret)
6251 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
6252 	if (ret) {
6253 		drm_dbg_kms(&dev_priv->drm,
6254 			    "Failed to create a copy of old state to restore: %i\n",
6255 			    ret);
6256 		goto fail;
6257 	}
6258 
6259 	ret = drm_atomic_commit(state);
6260 	if (ret) {
6261 		drm_dbg_kms(&dev_priv->drm,
6262 			    "failed to set mode on load-detect pipe\n");
6263 		goto fail;
6264 	}
6265 
6266 	old->restore_state = restore_state;
6267 	drm_atomic_state_put(state);
6268 
6269 	/* let the connector get through one full cycle before testing */
6270 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
6271 	return true;
6272 
6273 fail:
6274 	if (state) {
6275 		drm_atomic_state_put(state);
6276 		state = NULL;
6277 	}
6278 	if (restore_state) {
6279 		drm_atomic_state_put(restore_state);
6280 		restore_state = NULL;
6281 	}
6282 
6283 	if (ret == -EDEADLK)
6284 		return ret;
6285 
6286 	return false;
6287 }
6288 
6289 void intel_release_load_detect_pipe(struct drm_connector *connector,
6290 				    struct intel_load_detect_pipe *old,
6291 				    struct drm_modeset_acquire_ctx *ctx)
6292 {
6293 	struct intel_encoder *intel_encoder =
6294 		intel_attached_encoder(to_intel_connector(connector));
6295 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6296 	struct drm_encoder *encoder = &intel_encoder->base;
6297 	struct drm_atomic_state *state = old->restore_state;
6298 	int ret;
6299 
6300 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6301 		    connector->base.id, connector->name,
6302 		    encoder->base.id, encoder->name);
6303 
6304 	if (!state)
6305 		return;
6306 
6307 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6308 	if (ret)
6309 		drm_dbg_kms(&i915->drm,
6310 			    "Couldn't release load detect pipe: %i\n", ret);
6311 	drm_atomic_state_put(state);
6312 }
6313 
6314 static int i9xx_pll_refclk(struct drm_device *dev,
6315 			   const struct intel_crtc_state *pipe_config)
6316 {
6317 	struct drm_i915_private *dev_priv = to_i915(dev);
6318 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6319 
6320 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6321 		return dev_priv->vbt.lvds_ssc_freq;
6322 	else if (HAS_PCH_SPLIT(dev_priv))
6323 		return 120000;
6324 	else if (!IS_DISPLAY_VER(dev_priv, 2))
6325 		return 96000;
6326 	else
6327 		return 48000;
6328 }
6329 
6330 /* Returns the clock of the currently programmed mode of the given pipe. */
6331 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6332 				struct intel_crtc_state *pipe_config)
6333 {
6334 	struct drm_device *dev = crtc->base.dev;
6335 	struct drm_i915_private *dev_priv = to_i915(dev);
6336 	enum pipe pipe = crtc->pipe;
6337 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6338 	u32 fp;
6339 	struct dpll clock;
6340 	int port_clock;
6341 	int refclk = i9xx_pll_refclk(dev, pipe_config);
6342 
6343 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6344 		fp = pipe_config->dpll_hw_state.fp0;
6345 	else
6346 		fp = pipe_config->dpll_hw_state.fp1;
6347 
6348 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6349 	if (IS_PINEVIEW(dev_priv)) {
6350 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6351 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6352 	} else {
6353 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6354 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6355 	}
6356 
6357 	if (!IS_DISPLAY_VER(dev_priv, 2)) {
6358 		if (IS_PINEVIEW(dev_priv))
6359 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6360 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6361 		else
6362 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6363 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6364 
6365 		switch (dpll & DPLL_MODE_MASK) {
6366 		case DPLLB_MODE_DAC_SERIAL:
6367 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6368 				5 : 10;
6369 			break;
6370 		case DPLLB_MODE_LVDS:
6371 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6372 				7 : 14;
6373 			break;
6374 		default:
6375 			drm_dbg_kms(&dev_priv->drm,
6376 				    "Unknown DPLL mode %08x in programmed "
6377 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
6378 			return;
6379 		}
6380 
6381 		if (IS_PINEVIEW(dev_priv))
6382 			port_clock = pnv_calc_dpll_params(refclk, &clock);
6383 		else
6384 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
6385 	} else {
6386 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6387 								 LVDS);
6388 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6389 
6390 		if (is_lvds) {
6391 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6392 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6393 
6394 			if (lvds & LVDS_CLKB_POWER_UP)
6395 				clock.p2 = 7;
6396 			else
6397 				clock.p2 = 14;
6398 		} else {
6399 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6400 				clock.p1 = 2;
6401 			else {
6402 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6403 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6404 			}
6405 			if (dpll & PLL_P2_DIVIDE_BY_4)
6406 				clock.p2 = 4;
6407 			else
6408 				clock.p2 = 2;
6409 		}
6410 
6411 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
6412 	}
6413 
6414 	/*
6415 	 * This value includes pixel_multiplier. We will use
6416 	 * port_clock to compute adjusted_mode.crtc_clock in the
6417 	 * encoder's get_config() function.
6418 	 */
6419 	pipe_config->port_clock = port_clock;
6420 }
6421 
6422 int intel_dotclock_calculate(int link_freq,
6423 			     const struct intel_link_m_n *m_n)
6424 {
6425 	/*
6426 	 * The calculation for the data clock is:
6427 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6428 	 * But we want to avoid losing precison if possible, so:
6429 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6430 	 *
6431 	 * and the link clock is simpler:
6432 	 * link_clock = (m * link_clock) / n
6433 	 */
6434 
6435 	if (!m_n->link_n)
6436 		return 0;
6437 
6438 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6439 }
6440 
6441 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6442 			      struct intel_crtc_state *pipe_config)
6443 {
6444 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6445 
6446 	/* read out port_clock from the DPLL */
6447 	i9xx_crtc_clock_get(crtc, pipe_config);
6448 
6449 	/*
6450 	 * In case there is an active pipe without active ports,
6451 	 * we may need some idea for the dotclock anyway.
6452 	 * Calculate one based on the FDI configuration.
6453 	 */
6454 	pipe_config->hw.adjusted_mode.crtc_clock =
6455 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6456 					 &pipe_config->fdi_m_n);
6457 }
6458 
6459 /* Returns the currently programmed mode of the given encoder. */
6460 struct drm_display_mode *
6461 intel_encoder_current_mode(struct intel_encoder *encoder)
6462 {
6463 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6464 	struct intel_crtc_state *crtc_state;
6465 	struct drm_display_mode *mode;
6466 	struct intel_crtc *crtc;
6467 	enum pipe pipe;
6468 
6469 	if (!encoder->get_hw_state(encoder, &pipe))
6470 		return NULL;
6471 
6472 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6473 
6474 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6475 	if (!mode)
6476 		return NULL;
6477 
6478 	crtc_state = intel_crtc_state_alloc(crtc);
6479 	if (!crtc_state) {
6480 		kfree(mode);
6481 		return NULL;
6482 	}
6483 
6484 	if (!intel_crtc_get_pipe_config(crtc_state)) {
6485 		kfree(crtc_state);
6486 		kfree(mode);
6487 		return NULL;
6488 	}
6489 
6490 	intel_encoder_get_config(encoder, crtc_state);
6491 
6492 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6493 
6494 	kfree(crtc_state);
6495 
6496 	return mode;
6497 }
6498 
6499 /**
6500  * intel_wm_need_update - Check whether watermarks need updating
6501  * @cur: current plane state
6502  * @new: new plane state
6503  *
6504  * Check current plane state versus the new one to determine whether
6505  * watermarks need to be recalculated.
6506  *
6507  * Returns true or false.
6508  */
6509 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6510 				 struct intel_plane_state *new)
6511 {
6512 	/* Update watermarks on tiling or size changes. */
6513 	if (new->uapi.visible != cur->uapi.visible)
6514 		return true;
6515 
6516 	if (!cur->hw.fb || !new->hw.fb)
6517 		return false;
6518 
6519 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6520 	    cur->hw.rotation != new->hw.rotation ||
6521 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6522 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6523 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6524 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6525 		return true;
6526 
6527 	return false;
6528 }
6529 
6530 static bool needs_scaling(const struct intel_plane_state *state)
6531 {
6532 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
6533 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
6534 	int dst_w = drm_rect_width(&state->uapi.dst);
6535 	int dst_h = drm_rect_height(&state->uapi.dst);
6536 
6537 	return (src_w != dst_w || src_h != dst_h);
6538 }
6539 
6540 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6541 				    struct intel_crtc_state *crtc_state,
6542 				    const struct intel_plane_state *old_plane_state,
6543 				    struct intel_plane_state *plane_state)
6544 {
6545 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6546 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6547 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6548 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6549 	bool was_crtc_enabled = old_crtc_state->hw.active;
6550 	bool is_crtc_enabled = crtc_state->hw.active;
6551 	bool turn_off, turn_on, visible, was_visible;
6552 	int ret;
6553 
6554 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6555 		ret = skl_update_scaler_plane(crtc_state, plane_state);
6556 		if (ret)
6557 			return ret;
6558 	}
6559 
6560 	was_visible = old_plane_state->uapi.visible;
6561 	visible = plane_state->uapi.visible;
6562 
6563 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6564 		was_visible = false;
6565 
6566 	/*
6567 	 * Visibility is calculated as if the crtc was on, but
6568 	 * after scaler setup everything depends on it being off
6569 	 * when the crtc isn't active.
6570 	 *
6571 	 * FIXME this is wrong for watermarks. Watermarks should also
6572 	 * be computed as if the pipe would be active. Perhaps move
6573 	 * per-plane wm computation to the .check_plane() hook, and
6574 	 * only combine the results from all planes in the current place?
6575 	 */
6576 	if (!is_crtc_enabled) {
6577 		intel_plane_set_invisible(crtc_state, plane_state);
6578 		visible = false;
6579 	}
6580 
6581 	if (!was_visible && !visible)
6582 		return 0;
6583 
6584 	turn_off = was_visible && (!visible || mode_changed);
6585 	turn_on = visible && (!was_visible || mode_changed);
6586 
6587 	drm_dbg_atomic(&dev_priv->drm,
6588 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6589 		       crtc->base.base.id, crtc->base.name,
6590 		       plane->base.base.id, plane->base.name,
6591 		       was_visible, visible,
6592 		       turn_off, turn_on, mode_changed);
6593 
6594 	if (turn_on) {
6595 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6596 			crtc_state->update_wm_pre = true;
6597 
6598 		/* must disable cxsr around plane enable/disable */
6599 		if (plane->id != PLANE_CURSOR)
6600 			crtc_state->disable_cxsr = true;
6601 	} else if (turn_off) {
6602 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6603 			crtc_state->update_wm_post = true;
6604 
6605 		/* must disable cxsr around plane enable/disable */
6606 		if (plane->id != PLANE_CURSOR)
6607 			crtc_state->disable_cxsr = true;
6608 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
6609 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6610 			/* FIXME bollocks */
6611 			crtc_state->update_wm_pre = true;
6612 			crtc_state->update_wm_post = true;
6613 		}
6614 	}
6615 
6616 	if (visible || was_visible)
6617 		crtc_state->fb_bits |= plane->frontbuffer_bit;
6618 
6619 	/*
6620 	 * ILK/SNB DVSACNTR/Sprite Enable
6621 	 * IVB SPR_CTL/Sprite Enable
6622 	 * "When in Self Refresh Big FIFO mode, a write to enable the
6623 	 *  plane will be internally buffered and delayed while Big FIFO
6624 	 *  mode is exiting."
6625 	 *
6626 	 * Which means that enabling the sprite can take an extra frame
6627 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
6628 	 * down to LP0 and wait for vblank in order to make sure the
6629 	 * sprite gets enabled on the next vblank after the register write.
6630 	 * Doing otherwise would risk enabling the sprite one frame after
6631 	 * we've already signalled flip completion. We can resume LP1+
6632 	 * once the sprite has been enabled.
6633 	 *
6634 	 *
6635 	 * WaCxSRDisabledForSpriteScaling:ivb
6636 	 * IVB SPR_SCALE/Scaling Enable
6637 	 * "Low Power watermarks must be disabled for at least one
6638 	 *  frame before enabling sprite scaling, and kept disabled
6639 	 *  until sprite scaling is disabled."
6640 	 *
6641 	 * ILK/SNB DVSASCALE/Scaling Enable
6642 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
6643 	 *  masked off while Big FIFO mode is exiting."
6644 	 *
6645 	 * Despite the w/a only being listed for IVB we assume that
6646 	 * the ILK/SNB note has similar ramifications, hence we apply
6647 	 * the w/a on all three platforms.
6648 	 *
6649 	 * With experimental results seems this is needed also for primary
6650 	 * plane, not only sprite plane.
6651 	 */
6652 	if (plane->id != PLANE_CURSOR &&
6653 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6654 	     IS_IVYBRIDGE(dev_priv)) &&
6655 	    (turn_on || (!needs_scaling(old_plane_state) &&
6656 			 needs_scaling(plane_state))))
6657 		crtc_state->disable_lp_wm = true;
6658 
6659 	return 0;
6660 }
6661 
6662 static bool encoders_cloneable(const struct intel_encoder *a,
6663 			       const struct intel_encoder *b)
6664 {
6665 	/* masks could be asymmetric, so check both ways */
6666 	return a == b || (a->cloneable & (1 << b->type) &&
6667 			  b->cloneable & (1 << a->type));
6668 }
6669 
6670 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6671 					 struct intel_crtc *crtc,
6672 					 struct intel_encoder *encoder)
6673 {
6674 	struct intel_encoder *source_encoder;
6675 	struct drm_connector *connector;
6676 	struct drm_connector_state *connector_state;
6677 	int i;
6678 
6679 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6680 		if (connector_state->crtc != &crtc->base)
6681 			continue;
6682 
6683 		source_encoder =
6684 			to_intel_encoder(connector_state->best_encoder);
6685 		if (!encoders_cloneable(encoder, source_encoder))
6686 			return false;
6687 	}
6688 
6689 	return true;
6690 }
6691 
6692 static int icl_add_linked_planes(struct intel_atomic_state *state)
6693 {
6694 	struct intel_plane *plane, *linked;
6695 	struct intel_plane_state *plane_state, *linked_plane_state;
6696 	int i;
6697 
6698 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6699 		linked = plane_state->planar_linked_plane;
6700 
6701 		if (!linked)
6702 			continue;
6703 
6704 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
6705 		if (IS_ERR(linked_plane_state))
6706 			return PTR_ERR(linked_plane_state);
6707 
6708 		drm_WARN_ON(state->base.dev,
6709 			    linked_plane_state->planar_linked_plane != plane);
6710 		drm_WARN_ON(state->base.dev,
6711 			    linked_plane_state->planar_slave == plane_state->planar_slave);
6712 	}
6713 
6714 	return 0;
6715 }
6716 
6717 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6718 {
6719 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6720 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6721 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6722 	struct intel_plane *plane, *linked;
6723 	struct intel_plane_state *plane_state;
6724 	int i;
6725 
6726 	if (DISPLAY_VER(dev_priv) < 11)
6727 		return 0;
6728 
6729 	/*
6730 	 * Destroy all old plane links and make the slave plane invisible
6731 	 * in the crtc_state->active_planes mask.
6732 	 */
6733 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6734 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6735 			continue;
6736 
6737 		plane_state->planar_linked_plane = NULL;
6738 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
6739 			crtc_state->enabled_planes &= ~BIT(plane->id);
6740 			crtc_state->active_planes &= ~BIT(plane->id);
6741 			crtc_state->update_planes |= BIT(plane->id);
6742 		}
6743 
6744 		plane_state->planar_slave = false;
6745 	}
6746 
6747 	if (!crtc_state->nv12_planes)
6748 		return 0;
6749 
6750 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6751 		struct intel_plane_state *linked_state = NULL;
6752 
6753 		if (plane->pipe != crtc->pipe ||
6754 		    !(crtc_state->nv12_planes & BIT(plane->id)))
6755 			continue;
6756 
6757 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6758 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6759 				continue;
6760 
6761 			if (crtc_state->active_planes & BIT(linked->id))
6762 				continue;
6763 
6764 			linked_state = intel_atomic_get_plane_state(state, linked);
6765 			if (IS_ERR(linked_state))
6766 				return PTR_ERR(linked_state);
6767 
6768 			break;
6769 		}
6770 
6771 		if (!linked_state) {
6772 			drm_dbg_kms(&dev_priv->drm,
6773 				    "Need %d free Y planes for planar YUV\n",
6774 				    hweight8(crtc_state->nv12_planes));
6775 
6776 			return -EINVAL;
6777 		}
6778 
6779 		plane_state->planar_linked_plane = linked;
6780 
6781 		linked_state->planar_slave = true;
6782 		linked_state->planar_linked_plane = plane;
6783 		crtc_state->enabled_planes |= BIT(linked->id);
6784 		crtc_state->active_planes |= BIT(linked->id);
6785 		crtc_state->update_planes |= BIT(linked->id);
6786 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
6787 			    linked->base.name, plane->base.name);
6788 
6789 		/* Copy parameters to slave plane */
6790 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
6791 		linked_state->color_ctl = plane_state->color_ctl;
6792 		linked_state->view = plane_state->view;
6793 
6794 		intel_plane_copy_hw_state(linked_state, plane_state);
6795 		linked_state->uapi.src = plane_state->uapi.src;
6796 		linked_state->uapi.dst = plane_state->uapi.dst;
6797 
6798 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
6799 			if (linked->id == PLANE_SPRITE5)
6800 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
6801 			else if (linked->id == PLANE_SPRITE4)
6802 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
6803 			else if (linked->id == PLANE_SPRITE3)
6804 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
6805 			else if (linked->id == PLANE_SPRITE2)
6806 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
6807 			else
6808 				MISSING_CASE(linked->id);
6809 		}
6810 	}
6811 
6812 	return 0;
6813 }
6814 
6815 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
6816 {
6817 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6818 	struct intel_atomic_state *state =
6819 		to_intel_atomic_state(new_crtc_state->uapi.state);
6820 	const struct intel_crtc_state *old_crtc_state =
6821 		intel_atomic_get_old_crtc_state(state, crtc);
6822 
6823 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
6824 }
6825 
6826 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
6827 {
6828 	const struct drm_display_mode *pipe_mode =
6829 		&crtc_state->hw.pipe_mode;
6830 	int linetime_wm;
6831 
6832 	if (!crtc_state->hw.enable)
6833 		return 0;
6834 
6835 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6836 					pipe_mode->crtc_clock);
6837 
6838 	return min(linetime_wm, 0x1ff);
6839 }
6840 
6841 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
6842 			       const struct intel_cdclk_state *cdclk_state)
6843 {
6844 	const struct drm_display_mode *pipe_mode =
6845 		&crtc_state->hw.pipe_mode;
6846 	int linetime_wm;
6847 
6848 	if (!crtc_state->hw.enable)
6849 		return 0;
6850 
6851 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6852 					cdclk_state->logical.cdclk);
6853 
6854 	return min(linetime_wm, 0x1ff);
6855 }
6856 
6857 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
6858 {
6859 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6860 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6861 	const struct drm_display_mode *pipe_mode =
6862 		&crtc_state->hw.pipe_mode;
6863 	int linetime_wm;
6864 
6865 	if (!crtc_state->hw.enable)
6866 		return 0;
6867 
6868 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
6869 				   crtc_state->pixel_rate);
6870 
6871 	/* Display WA #1135: BXT:ALL GLK:ALL */
6872 	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
6873 		linetime_wm /= 2;
6874 
6875 	return min(linetime_wm, 0x1ff);
6876 }
6877 
6878 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
6879 				   struct intel_crtc *crtc)
6880 {
6881 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6882 	struct intel_crtc_state *crtc_state =
6883 		intel_atomic_get_new_crtc_state(state, crtc);
6884 	const struct intel_cdclk_state *cdclk_state;
6885 
6886 	if (DISPLAY_VER(dev_priv) >= 9)
6887 		crtc_state->linetime = skl_linetime_wm(crtc_state);
6888 	else
6889 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
6890 
6891 	if (!hsw_crtc_supports_ips(crtc))
6892 		return 0;
6893 
6894 	cdclk_state = intel_atomic_get_cdclk_state(state);
6895 	if (IS_ERR(cdclk_state))
6896 		return PTR_ERR(cdclk_state);
6897 
6898 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
6899 						       cdclk_state);
6900 
6901 	return 0;
6902 }
6903 
6904 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
6905 				   struct intel_crtc *crtc)
6906 {
6907 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6908 	struct intel_crtc_state *crtc_state =
6909 		intel_atomic_get_new_crtc_state(state, crtc);
6910 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6911 	int ret;
6912 
6913 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
6914 	    mode_changed && !crtc_state->hw.active)
6915 		crtc_state->update_wm_post = true;
6916 
6917 	if (mode_changed && crtc_state->hw.enable &&
6918 	    dev_priv->display.crtc_compute_clock &&
6919 	    !crtc_state->bigjoiner_slave &&
6920 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
6921 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
6922 		if (ret)
6923 			return ret;
6924 	}
6925 
6926 	/*
6927 	 * May need to update pipe gamma enable bits
6928 	 * when C8 planes are getting enabled/disabled.
6929 	 */
6930 	if (c8_planes_changed(crtc_state))
6931 		crtc_state->uapi.color_mgmt_changed = true;
6932 
6933 	if (mode_changed || crtc_state->update_pipe ||
6934 	    crtc_state->uapi.color_mgmt_changed) {
6935 		ret = intel_color_check(crtc_state);
6936 		if (ret)
6937 			return ret;
6938 	}
6939 
6940 	if (dev_priv->display.compute_pipe_wm) {
6941 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
6942 		if (ret) {
6943 			drm_dbg_kms(&dev_priv->drm,
6944 				    "Target pipe watermarks are invalid\n");
6945 			return ret;
6946 		}
6947 	}
6948 
6949 	if (dev_priv->display.compute_intermediate_wm) {
6950 		if (drm_WARN_ON(&dev_priv->drm,
6951 				!dev_priv->display.compute_pipe_wm))
6952 			return 0;
6953 
6954 		/*
6955 		 * Calculate 'intermediate' watermarks that satisfy both the
6956 		 * old state and the new state.  We can program these
6957 		 * immediately.
6958 		 */
6959 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
6960 		if (ret) {
6961 			drm_dbg_kms(&dev_priv->drm,
6962 				    "No valid intermediate pipe watermarks are possible\n");
6963 			return ret;
6964 		}
6965 	}
6966 
6967 	if (DISPLAY_VER(dev_priv) >= 9) {
6968 		if (mode_changed || crtc_state->update_pipe) {
6969 			ret = skl_update_scaler_crtc(crtc_state);
6970 			if (ret)
6971 				return ret;
6972 		}
6973 
6974 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
6975 		if (ret)
6976 			return ret;
6977 	}
6978 
6979 	if (HAS_IPS(dev_priv)) {
6980 		ret = hsw_compute_ips_config(crtc_state);
6981 		if (ret)
6982 			return ret;
6983 	}
6984 
6985 	if (DISPLAY_VER(dev_priv) >= 9 ||
6986 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
6987 		ret = hsw_compute_linetime_wm(state, crtc);
6988 		if (ret)
6989 			return ret;
6990 
6991 	}
6992 
6993 	if (!mode_changed) {
6994 		ret = intel_psr2_sel_fetch_update(state, crtc);
6995 		if (ret)
6996 			return ret;
6997 	}
6998 
6999 	return 0;
7000 }
7001 
7002 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7003 {
7004 	struct intel_connector *connector;
7005 	struct drm_connector_list_iter conn_iter;
7006 
7007 	drm_connector_list_iter_begin(dev, &conn_iter);
7008 	for_each_intel_connector_iter(connector, &conn_iter) {
7009 		struct drm_connector_state *conn_state = connector->base.state;
7010 		struct intel_encoder *encoder =
7011 			to_intel_encoder(connector->base.encoder);
7012 
7013 		if (conn_state->crtc)
7014 			drm_connector_put(&connector->base);
7015 
7016 		if (encoder) {
7017 			struct intel_crtc *crtc =
7018 				to_intel_crtc(encoder->base.crtc);
7019 			const struct intel_crtc_state *crtc_state =
7020 				to_intel_crtc_state(crtc->base.state);
7021 
7022 			conn_state->best_encoder = &encoder->base;
7023 			conn_state->crtc = &crtc->base;
7024 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7025 
7026 			drm_connector_get(&connector->base);
7027 		} else {
7028 			conn_state->best_encoder = NULL;
7029 			conn_state->crtc = NULL;
7030 		}
7031 	}
7032 	drm_connector_list_iter_end(&conn_iter);
7033 }
7034 
7035 static int
7036 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7037 		      struct intel_crtc_state *pipe_config)
7038 {
7039 	struct drm_connector *connector = conn_state->connector;
7040 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7041 	const struct drm_display_info *info = &connector->display_info;
7042 	int bpp;
7043 
7044 	switch (conn_state->max_bpc) {
7045 	case 6 ... 7:
7046 		bpp = 6 * 3;
7047 		break;
7048 	case 8 ... 9:
7049 		bpp = 8 * 3;
7050 		break;
7051 	case 10 ... 11:
7052 		bpp = 10 * 3;
7053 		break;
7054 	case 12 ... 16:
7055 		bpp = 12 * 3;
7056 		break;
7057 	default:
7058 		MISSING_CASE(conn_state->max_bpc);
7059 		return -EINVAL;
7060 	}
7061 
7062 	if (bpp < pipe_config->pipe_bpp) {
7063 		drm_dbg_kms(&i915->drm,
7064 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7065 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7066 			    connector->base.id, connector->name,
7067 			    bpp, 3 * info->bpc,
7068 			    3 * conn_state->max_requested_bpc,
7069 			    pipe_config->pipe_bpp);
7070 
7071 		pipe_config->pipe_bpp = bpp;
7072 	}
7073 
7074 	return 0;
7075 }
7076 
7077 static int
7078 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7079 			  struct intel_crtc_state *pipe_config)
7080 {
7081 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7082 	struct drm_atomic_state *state = pipe_config->uapi.state;
7083 	struct drm_connector *connector;
7084 	struct drm_connector_state *connector_state;
7085 	int bpp, i;
7086 
7087 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7088 	    IS_CHERRYVIEW(dev_priv)))
7089 		bpp = 10*3;
7090 	else if (DISPLAY_VER(dev_priv) >= 5)
7091 		bpp = 12*3;
7092 	else
7093 		bpp = 8*3;
7094 
7095 	pipe_config->pipe_bpp = bpp;
7096 
7097 	/* Clamp display bpp to connector max bpp */
7098 	for_each_new_connector_in_state(state, connector, connector_state, i) {
7099 		int ret;
7100 
7101 		if (connector_state->crtc != &crtc->base)
7102 			continue;
7103 
7104 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7105 		if (ret)
7106 			return ret;
7107 	}
7108 
7109 	return 0;
7110 }
7111 
7112 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7113 				    const struct drm_display_mode *mode)
7114 {
7115 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7116 		    "type: 0x%x flags: 0x%x\n",
7117 		    mode->crtc_clock,
7118 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
7119 		    mode->crtc_hsync_end, mode->crtc_htotal,
7120 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
7121 		    mode->crtc_vsync_end, mode->crtc_vtotal,
7122 		    mode->type, mode->flags);
7123 }
7124 
7125 static void
7126 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7127 		      const char *id, unsigned int lane_count,
7128 		      const struct intel_link_m_n *m_n)
7129 {
7130 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7131 
7132 	drm_dbg_kms(&i915->drm,
7133 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7134 		    id, lane_count,
7135 		    m_n->gmch_m, m_n->gmch_n,
7136 		    m_n->link_m, m_n->link_n, m_n->tu);
7137 }
7138 
7139 static void
7140 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7141 		     const union hdmi_infoframe *frame)
7142 {
7143 	if (!drm_debug_enabled(DRM_UT_KMS))
7144 		return;
7145 
7146 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7147 }
7148 
7149 static void
7150 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7151 		      const struct drm_dp_vsc_sdp *vsc)
7152 {
7153 	if (!drm_debug_enabled(DRM_UT_KMS))
7154 		return;
7155 
7156 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7157 }
7158 
7159 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7160 
7161 static const char * const output_type_str[] = {
7162 	OUTPUT_TYPE(UNUSED),
7163 	OUTPUT_TYPE(ANALOG),
7164 	OUTPUT_TYPE(DVO),
7165 	OUTPUT_TYPE(SDVO),
7166 	OUTPUT_TYPE(LVDS),
7167 	OUTPUT_TYPE(TVOUT),
7168 	OUTPUT_TYPE(HDMI),
7169 	OUTPUT_TYPE(DP),
7170 	OUTPUT_TYPE(EDP),
7171 	OUTPUT_TYPE(DSI),
7172 	OUTPUT_TYPE(DDI),
7173 	OUTPUT_TYPE(DP_MST),
7174 };
7175 
7176 #undef OUTPUT_TYPE
7177 
7178 static void snprintf_output_types(char *buf, size_t len,
7179 				  unsigned int output_types)
7180 {
7181 	char *str = buf;
7182 	int i;
7183 
7184 	str[0] = '\0';
7185 
7186 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7187 		int r;
7188 
7189 		if ((output_types & BIT(i)) == 0)
7190 			continue;
7191 
7192 		r = snprintf(str, len, "%s%s",
7193 			     str != buf ? "," : "", output_type_str[i]);
7194 		if (r >= len)
7195 			break;
7196 		str += r;
7197 		len -= r;
7198 
7199 		output_types &= ~BIT(i);
7200 	}
7201 
7202 	WARN_ON_ONCE(output_types != 0);
7203 }
7204 
7205 static const char * const output_format_str[] = {
7206 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7207 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7208 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7209 };
7210 
7211 static const char *output_formats(enum intel_output_format format)
7212 {
7213 	if (format >= ARRAY_SIZE(output_format_str))
7214 		return "invalid";
7215 	return output_format_str[format];
7216 }
7217 
7218 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7219 {
7220 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7221 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
7222 	const struct drm_framebuffer *fb = plane_state->hw.fb;
7223 
7224 	if (!fb) {
7225 		drm_dbg_kms(&i915->drm,
7226 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7227 			    plane->base.base.id, plane->base.name,
7228 			    yesno(plane_state->uapi.visible));
7229 		return;
7230 	}
7231 
7232 	drm_dbg_kms(&i915->drm,
7233 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7234 		    plane->base.base.id, plane->base.name,
7235 		    fb->base.id, fb->width, fb->height, &fb->format->format,
7236 		    fb->modifier, yesno(plane_state->uapi.visible));
7237 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7238 		    plane_state->hw.rotation, plane_state->scaler_id);
7239 	if (plane_state->uapi.visible)
7240 		drm_dbg_kms(&i915->drm,
7241 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7242 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
7243 			    DRM_RECT_ARG(&plane_state->uapi.dst));
7244 }
7245 
7246 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7247 				   struct intel_atomic_state *state,
7248 				   const char *context)
7249 {
7250 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7251 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7252 	const struct intel_plane_state *plane_state;
7253 	struct intel_plane *plane;
7254 	char buf[64];
7255 	int i;
7256 
7257 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7258 		    crtc->base.base.id, crtc->base.name,
7259 		    yesno(pipe_config->hw.enable), context);
7260 
7261 	if (!pipe_config->hw.enable)
7262 		goto dump_planes;
7263 
7264 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7265 	drm_dbg_kms(&dev_priv->drm,
7266 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
7267 		    yesno(pipe_config->hw.active),
7268 		    buf, pipe_config->output_types,
7269 		    output_formats(pipe_config->output_format));
7270 
7271 	drm_dbg_kms(&dev_priv->drm,
7272 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7273 		    transcoder_name(pipe_config->cpu_transcoder),
7274 		    pipe_config->pipe_bpp, pipe_config->dither);
7275 
7276 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7277 		    transcoder_name(pipe_config->mst_master_transcoder));
7278 
7279 	drm_dbg_kms(&dev_priv->drm,
7280 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7281 		    transcoder_name(pipe_config->master_transcoder),
7282 		    pipe_config->sync_mode_slaves_mask);
7283 
7284 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7285 		    pipe_config->bigjoiner_slave ? "slave" :
7286 		    pipe_config->bigjoiner ? "master" : "no");
7287 
7288 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7289 		    enableddisabled(pipe_config->splitter.enable),
7290 		    pipe_config->splitter.link_count,
7291 		    pipe_config->splitter.pixel_overlap);
7292 
7293 	if (pipe_config->has_pch_encoder)
7294 		intel_dump_m_n_config(pipe_config, "fdi",
7295 				      pipe_config->fdi_lanes,
7296 				      &pipe_config->fdi_m_n);
7297 
7298 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7299 		intel_dump_m_n_config(pipe_config, "dp m_n",
7300 				pipe_config->lane_count, &pipe_config->dp_m_n);
7301 		if (pipe_config->has_drrs)
7302 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
7303 					      pipe_config->lane_count,
7304 					      &pipe_config->dp_m2_n2);
7305 	}
7306 
7307 	drm_dbg_kms(&dev_priv->drm,
7308 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7309 		    pipe_config->has_audio, pipe_config->has_infoframe,
7310 		    pipe_config->infoframes.enable);
7311 
7312 	if (pipe_config->infoframes.enable &
7313 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7314 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7315 			    pipe_config->infoframes.gcp);
7316 	if (pipe_config->infoframes.enable &
7317 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7318 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7319 	if (pipe_config->infoframes.enable &
7320 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7321 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7322 	if (pipe_config->infoframes.enable &
7323 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7324 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7325 	if (pipe_config->infoframes.enable &
7326 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7327 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7328 	if (pipe_config->infoframes.enable &
7329 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7330 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7331 	if (pipe_config->infoframes.enable &
7332 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
7333 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7334 
7335 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7336 		    yesno(pipe_config->vrr.enable),
7337 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7338 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
7339 		    intel_vrr_vmin_vblank_start(pipe_config),
7340 		    intel_vrr_vmax_vblank_start(pipe_config));
7341 
7342 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7343 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7344 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7345 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7346 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7347 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7348 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7349 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7350 	drm_dbg_kms(&dev_priv->drm,
7351 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7352 		    pipe_config->port_clock,
7353 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7354 		    pipe_config->pixel_rate);
7355 
7356 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7357 		    pipe_config->linetime, pipe_config->ips_linetime);
7358 
7359 	if (DISPLAY_VER(dev_priv) >= 9)
7360 		drm_dbg_kms(&dev_priv->drm,
7361 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7362 			    crtc->num_scalers,
7363 			    pipe_config->scaler_state.scaler_users,
7364 			    pipe_config->scaler_state.scaler_id);
7365 
7366 	if (HAS_GMCH(dev_priv))
7367 		drm_dbg_kms(&dev_priv->drm,
7368 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7369 			    pipe_config->gmch_pfit.control,
7370 			    pipe_config->gmch_pfit.pgm_ratios,
7371 			    pipe_config->gmch_pfit.lvds_border_bits);
7372 	else
7373 		drm_dbg_kms(&dev_priv->drm,
7374 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7375 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7376 			    enableddisabled(pipe_config->pch_pfit.enabled),
7377 			    yesno(pipe_config->pch_pfit.force_thru));
7378 
7379 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7380 		    pipe_config->ips_enabled, pipe_config->double_wide);
7381 
7382 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7383 
7384 	if (IS_CHERRYVIEW(dev_priv))
7385 		drm_dbg_kms(&dev_priv->drm,
7386 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7387 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
7388 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7389 	else
7390 		drm_dbg_kms(&dev_priv->drm,
7391 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7392 			    pipe_config->csc_mode, pipe_config->gamma_mode,
7393 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7394 
7395 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7396 		    pipe_config->hw.degamma_lut ?
7397 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7398 		    pipe_config->hw.gamma_lut ?
7399 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7400 
7401 dump_planes:
7402 	if (!state)
7403 		return;
7404 
7405 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7406 		if (plane->pipe == crtc->pipe)
7407 			intel_dump_plane_state(plane_state);
7408 	}
7409 }
7410 
7411 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7412 {
7413 	struct drm_device *dev = state->base.dev;
7414 	struct drm_connector *connector;
7415 	struct drm_connector_list_iter conn_iter;
7416 	unsigned int used_ports = 0;
7417 	unsigned int used_mst_ports = 0;
7418 	bool ret = true;
7419 
7420 	/*
7421 	 * We're going to peek into connector->state,
7422 	 * hence connection_mutex must be held.
7423 	 */
7424 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7425 
7426 	/*
7427 	 * Walk the connector list instead of the encoder
7428 	 * list to detect the problem on ddi platforms
7429 	 * where there's just one encoder per digital port.
7430 	 */
7431 	drm_connector_list_iter_begin(dev, &conn_iter);
7432 	drm_for_each_connector_iter(connector, &conn_iter) {
7433 		struct drm_connector_state *connector_state;
7434 		struct intel_encoder *encoder;
7435 
7436 		connector_state =
7437 			drm_atomic_get_new_connector_state(&state->base,
7438 							   connector);
7439 		if (!connector_state)
7440 			connector_state = connector->state;
7441 
7442 		if (!connector_state->best_encoder)
7443 			continue;
7444 
7445 		encoder = to_intel_encoder(connector_state->best_encoder);
7446 
7447 		drm_WARN_ON(dev, !connector_state->crtc);
7448 
7449 		switch (encoder->type) {
7450 		case INTEL_OUTPUT_DDI:
7451 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7452 				break;
7453 			fallthrough;
7454 		case INTEL_OUTPUT_DP:
7455 		case INTEL_OUTPUT_HDMI:
7456 		case INTEL_OUTPUT_EDP:
7457 			/* the same port mustn't appear more than once */
7458 			if (used_ports & BIT(encoder->port))
7459 				ret = false;
7460 
7461 			used_ports |= BIT(encoder->port);
7462 			break;
7463 		case INTEL_OUTPUT_DP_MST:
7464 			used_mst_ports |=
7465 				1 << encoder->port;
7466 			break;
7467 		default:
7468 			break;
7469 		}
7470 	}
7471 	drm_connector_list_iter_end(&conn_iter);
7472 
7473 	/* can't mix MST and SST/HDMI on the same port */
7474 	if (used_ports & used_mst_ports)
7475 		return false;
7476 
7477 	return ret;
7478 }
7479 
7480 static void
7481 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7482 					   struct intel_crtc_state *crtc_state)
7483 {
7484 	const struct intel_crtc_state *from_crtc_state = crtc_state;
7485 
7486 	if (crtc_state->bigjoiner_slave) {
7487 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
7488 								  crtc_state->bigjoiner_linked_crtc);
7489 
7490 		/* No need to copy state if the master state is unchanged */
7491 		if (!from_crtc_state)
7492 			return;
7493 	}
7494 
7495 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7496 }
7497 
7498 static void
7499 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7500 				 struct intel_crtc_state *crtc_state)
7501 {
7502 	crtc_state->hw.enable = crtc_state->uapi.enable;
7503 	crtc_state->hw.active = crtc_state->uapi.active;
7504 	crtc_state->hw.mode = crtc_state->uapi.mode;
7505 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7506 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7507 
7508 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7509 }
7510 
7511 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7512 {
7513 	if (crtc_state->bigjoiner_slave)
7514 		return;
7515 
7516 	crtc_state->uapi.enable = crtc_state->hw.enable;
7517 	crtc_state->uapi.active = crtc_state->hw.active;
7518 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
7519 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7520 
7521 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7522 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7523 
7524 	/* copy color blobs to uapi */
7525 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7526 				  crtc_state->hw.degamma_lut);
7527 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7528 				  crtc_state->hw.gamma_lut);
7529 	drm_property_replace_blob(&crtc_state->uapi.ctm,
7530 				  crtc_state->hw.ctm);
7531 }
7532 
7533 static int
7534 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7535 			  const struct intel_crtc_state *from_crtc_state)
7536 {
7537 	struct intel_crtc_state *saved_state;
7538 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7539 
7540 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7541 	if (!saved_state)
7542 		return -ENOMEM;
7543 
7544 	saved_state->uapi = crtc_state->uapi;
7545 	saved_state->scaler_state = crtc_state->scaler_state;
7546 	saved_state->shared_dpll = crtc_state->shared_dpll;
7547 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7548 	saved_state->crc_enabled = crtc_state->crc_enabled;
7549 
7550 	intel_crtc_free_hw_state(crtc_state);
7551 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7552 	kfree(saved_state);
7553 
7554 	/* Re-init hw state */
7555 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7556 	crtc_state->hw.enable = from_crtc_state->hw.enable;
7557 	crtc_state->hw.active = from_crtc_state->hw.active;
7558 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7559 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7560 
7561 	/* Some fixups */
7562 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7563 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7564 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7565 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7566 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7567 	crtc_state->bigjoiner_slave = true;
7568 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7569 	crtc_state->has_audio = false;
7570 
7571 	return 0;
7572 }
7573 
7574 static int
7575 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7576 				 struct intel_crtc_state *crtc_state)
7577 {
7578 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7579 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7580 	struct intel_crtc_state *saved_state;
7581 
7582 	saved_state = intel_crtc_state_alloc(crtc);
7583 	if (!saved_state)
7584 		return -ENOMEM;
7585 
7586 	/* free the old crtc_state->hw members */
7587 	intel_crtc_free_hw_state(crtc_state);
7588 
7589 	/* FIXME: before the switch to atomic started, a new pipe_config was
7590 	 * kzalloc'd. Code that depends on any field being zero should be
7591 	 * fixed, so that the crtc_state can be safely duplicated. For now,
7592 	 * only fields that are know to not cause problems are preserved. */
7593 
7594 	saved_state->uapi = crtc_state->uapi;
7595 	saved_state->scaler_state = crtc_state->scaler_state;
7596 	saved_state->shared_dpll = crtc_state->shared_dpll;
7597 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7598 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7599 	       sizeof(saved_state->icl_port_dplls));
7600 	saved_state->crc_enabled = crtc_state->crc_enabled;
7601 	if (IS_G4X(dev_priv) ||
7602 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7603 		saved_state->wm = crtc_state->wm;
7604 
7605 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7606 	kfree(saved_state);
7607 
7608 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7609 
7610 	return 0;
7611 }
7612 
7613 static int
7614 intel_modeset_pipe_config(struct intel_atomic_state *state,
7615 			  struct intel_crtc_state *pipe_config)
7616 {
7617 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
7618 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7619 	struct drm_connector *connector;
7620 	struct drm_connector_state *connector_state;
7621 	int base_bpp, ret, i;
7622 	bool retry = true;
7623 
7624 	pipe_config->cpu_transcoder =
7625 		(enum transcoder) to_intel_crtc(crtc)->pipe;
7626 
7627 	/*
7628 	 * Sanitize sync polarity flags based on requested ones. If neither
7629 	 * positive or negative polarity is requested, treat this as meaning
7630 	 * negative polarity.
7631 	 */
7632 	if (!(pipe_config->hw.adjusted_mode.flags &
7633 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7634 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7635 
7636 	if (!(pipe_config->hw.adjusted_mode.flags &
7637 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7638 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7639 
7640 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7641 					pipe_config);
7642 	if (ret)
7643 		return ret;
7644 
7645 	base_bpp = pipe_config->pipe_bpp;
7646 
7647 	/*
7648 	 * Determine the real pipe dimensions. Note that stereo modes can
7649 	 * increase the actual pipe size due to the frame doubling and
7650 	 * insertion of additional space for blanks between the frame. This
7651 	 * is stored in the crtc timings. We use the requested mode to do this
7652 	 * computation to clearly distinguish it from the adjusted mode, which
7653 	 * can be changed by the connectors in the below retry loop.
7654 	 */
7655 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
7656 			       &pipe_config->pipe_src_w,
7657 			       &pipe_config->pipe_src_h);
7658 
7659 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7660 		struct intel_encoder *encoder =
7661 			to_intel_encoder(connector_state->best_encoder);
7662 
7663 		if (connector_state->crtc != crtc)
7664 			continue;
7665 
7666 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7667 			drm_dbg_kms(&i915->drm,
7668 				    "rejecting invalid cloning configuration\n");
7669 			return -EINVAL;
7670 		}
7671 
7672 		/*
7673 		 * Determine output_types before calling the .compute_config()
7674 		 * hooks so that the hooks can use this information safely.
7675 		 */
7676 		if (encoder->compute_output_type)
7677 			pipe_config->output_types |=
7678 				BIT(encoder->compute_output_type(encoder, pipe_config,
7679 								 connector_state));
7680 		else
7681 			pipe_config->output_types |= BIT(encoder->type);
7682 	}
7683 
7684 encoder_retry:
7685 	/* Ensure the port clock defaults are reset when retrying. */
7686 	pipe_config->port_clock = 0;
7687 	pipe_config->pixel_multiplier = 1;
7688 
7689 	/* Fill in default crtc timings, allow encoders to overwrite them. */
7690 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7691 			      CRTC_STEREO_DOUBLE);
7692 
7693 	/* Pass our mode to the connectors and the CRTC to give them a chance to
7694 	 * adjust it according to limitations or connector properties, and also
7695 	 * a chance to reject the mode entirely.
7696 	 */
7697 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7698 		struct intel_encoder *encoder =
7699 			to_intel_encoder(connector_state->best_encoder);
7700 
7701 		if (connector_state->crtc != crtc)
7702 			continue;
7703 
7704 		ret = encoder->compute_config(encoder, pipe_config,
7705 					      connector_state);
7706 		if (ret < 0) {
7707 			if (ret != -EDEADLK)
7708 				drm_dbg_kms(&i915->drm,
7709 					    "Encoder config failure: %d\n",
7710 					    ret);
7711 			return ret;
7712 		}
7713 	}
7714 
7715 	/* Set default port clock if not overwritten by the encoder. Needs to be
7716 	 * done afterwards in case the encoder adjusts the mode. */
7717 	if (!pipe_config->port_clock)
7718 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7719 			* pipe_config->pixel_multiplier;
7720 
7721 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7722 	if (ret == -EDEADLK)
7723 		return ret;
7724 	if (ret < 0) {
7725 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7726 		return ret;
7727 	}
7728 
7729 	if (ret == I915_DISPLAY_CONFIG_RETRY) {
7730 		if (drm_WARN(&i915->drm, !retry,
7731 			     "loop in pipe configuration computation\n"))
7732 			return -EINVAL;
7733 
7734 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7735 		retry = false;
7736 		goto encoder_retry;
7737 	}
7738 
7739 	/* Dithering seems to not pass-through bits correctly when it should, so
7740 	 * only enable it on 6bpc panels and when its not a compliance
7741 	 * test requesting 6bpc video pattern.
7742 	 */
7743 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7744 		!pipe_config->dither_force_disable;
7745 	drm_dbg_kms(&i915->drm,
7746 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7747 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7748 
7749 	return 0;
7750 }
7751 
7752 static int
7753 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7754 {
7755 	struct intel_atomic_state *state =
7756 		to_intel_atomic_state(crtc_state->uapi.state);
7757 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7758 	struct drm_connector_state *conn_state;
7759 	struct drm_connector *connector;
7760 	int i;
7761 
7762 	for_each_new_connector_in_state(&state->base, connector,
7763 					conn_state, i) {
7764 		struct intel_encoder *encoder =
7765 			to_intel_encoder(conn_state->best_encoder);
7766 		int ret;
7767 
7768 		if (conn_state->crtc != &crtc->base ||
7769 		    !encoder->compute_config_late)
7770 			continue;
7771 
7772 		ret = encoder->compute_config_late(encoder, crtc_state,
7773 						   conn_state);
7774 		if (ret)
7775 			return ret;
7776 	}
7777 
7778 	return 0;
7779 }
7780 
7781 bool intel_fuzzy_clock_check(int clock1, int clock2)
7782 {
7783 	int diff;
7784 
7785 	if (clock1 == clock2)
7786 		return true;
7787 
7788 	if (!clock1 || !clock2)
7789 		return false;
7790 
7791 	diff = abs(clock1 - clock2);
7792 
7793 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
7794 		return true;
7795 
7796 	return false;
7797 }
7798 
7799 static bool
7800 intel_compare_m_n(unsigned int m, unsigned int n,
7801 		  unsigned int m2, unsigned int n2,
7802 		  bool exact)
7803 {
7804 	if (m == m2 && n == n2)
7805 		return true;
7806 
7807 	if (exact || !m || !n || !m2 || !n2)
7808 		return false;
7809 
7810 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
7811 
7812 	if (n > n2) {
7813 		while (n > n2) {
7814 			m2 <<= 1;
7815 			n2 <<= 1;
7816 		}
7817 	} else if (n < n2) {
7818 		while (n < n2) {
7819 			m <<= 1;
7820 			n <<= 1;
7821 		}
7822 	}
7823 
7824 	if (n != n2)
7825 		return false;
7826 
7827 	return intel_fuzzy_clock_check(m, m2);
7828 }
7829 
7830 static bool
7831 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
7832 		       const struct intel_link_m_n *m2_n2,
7833 		       bool exact)
7834 {
7835 	return m_n->tu == m2_n2->tu &&
7836 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
7837 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
7838 		intel_compare_m_n(m_n->link_m, m_n->link_n,
7839 				  m2_n2->link_m, m2_n2->link_n, exact);
7840 }
7841 
7842 static bool
7843 intel_compare_infoframe(const union hdmi_infoframe *a,
7844 			const union hdmi_infoframe *b)
7845 {
7846 	return memcmp(a, b, sizeof(*a)) == 0;
7847 }
7848 
7849 static bool
7850 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
7851 			 const struct drm_dp_vsc_sdp *b)
7852 {
7853 	return memcmp(a, b, sizeof(*a)) == 0;
7854 }
7855 
7856 static void
7857 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
7858 			       bool fastset, const char *name,
7859 			       const union hdmi_infoframe *a,
7860 			       const union hdmi_infoframe *b)
7861 {
7862 	if (fastset) {
7863 		if (!drm_debug_enabled(DRM_UT_KMS))
7864 			return;
7865 
7866 		drm_dbg_kms(&dev_priv->drm,
7867 			    "fastset mismatch in %s infoframe\n", name);
7868 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
7869 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
7870 		drm_dbg_kms(&dev_priv->drm, "found:\n");
7871 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
7872 	} else {
7873 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
7874 		drm_err(&dev_priv->drm, "expected:\n");
7875 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
7876 		drm_err(&dev_priv->drm, "found:\n");
7877 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
7878 	}
7879 }
7880 
7881 static void
7882 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
7883 				bool fastset, const char *name,
7884 				const struct drm_dp_vsc_sdp *a,
7885 				const struct drm_dp_vsc_sdp *b)
7886 {
7887 	if (fastset) {
7888 		if (!drm_debug_enabled(DRM_UT_KMS))
7889 			return;
7890 
7891 		drm_dbg_kms(&dev_priv->drm,
7892 			    "fastset mismatch in %s dp sdp\n", name);
7893 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
7894 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
7895 		drm_dbg_kms(&dev_priv->drm, "found:\n");
7896 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
7897 	} else {
7898 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
7899 		drm_err(&dev_priv->drm, "expected:\n");
7900 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
7901 		drm_err(&dev_priv->drm, "found:\n");
7902 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
7903 	}
7904 }
7905 
7906 static void __printf(4, 5)
7907 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
7908 		     const char *name, const char *format, ...)
7909 {
7910 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7911 	struct va_format vaf;
7912 	va_list args;
7913 
7914 	va_start(args, format);
7915 	vaf.fmt = format;
7916 	vaf.va = &args;
7917 
7918 	if (fastset)
7919 		drm_dbg_kms(&i915->drm,
7920 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
7921 			    crtc->base.base.id, crtc->base.name, name, &vaf);
7922 	else
7923 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
7924 			crtc->base.base.id, crtc->base.name, name, &vaf);
7925 
7926 	va_end(args);
7927 }
7928 
7929 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
7930 {
7931 	if (dev_priv->params.fastboot != -1)
7932 		return dev_priv->params.fastboot;
7933 
7934 	/* Enable fastboot by default on Skylake and newer */
7935 	if (DISPLAY_VER(dev_priv) >= 9)
7936 		return true;
7937 
7938 	/* Enable fastboot by default on VLV and CHV */
7939 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7940 		return true;
7941 
7942 	/* Disabled by default on all others */
7943 	return false;
7944 }
7945 
7946 static bool
7947 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
7948 			  const struct intel_crtc_state *pipe_config,
7949 			  bool fastset)
7950 {
7951 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
7952 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7953 	bool ret = true;
7954 	u32 bp_gamma = 0;
7955 	bool fixup_inherited = fastset &&
7956 		current_config->inherited && !pipe_config->inherited;
7957 
7958 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
7959 		drm_dbg_kms(&dev_priv->drm,
7960 			    "initial modeset and fastboot not set\n");
7961 		ret = false;
7962 	}
7963 
7964 #define PIPE_CONF_CHECK_X(name) do { \
7965 	if (current_config->name != pipe_config->name) { \
7966 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
7967 				     "(expected 0x%08x, found 0x%08x)", \
7968 				     current_config->name, \
7969 				     pipe_config->name); \
7970 		ret = false; \
7971 	} \
7972 } while (0)
7973 
7974 #define PIPE_CONF_CHECK_I(name) do { \
7975 	if (current_config->name != pipe_config->name) { \
7976 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
7977 				     "(expected %i, found %i)", \
7978 				     current_config->name, \
7979 				     pipe_config->name); \
7980 		ret = false; \
7981 	} \
7982 } while (0)
7983 
7984 #define PIPE_CONF_CHECK_BOOL(name) do { \
7985 	if (current_config->name != pipe_config->name) { \
7986 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
7987 				     "(expected %s, found %s)", \
7988 				     yesno(current_config->name), \
7989 				     yesno(pipe_config->name)); \
7990 		ret = false; \
7991 	} \
7992 } while (0)
7993 
7994 /*
7995  * Checks state where we only read out the enabling, but not the entire
7996  * state itself (like full infoframes or ELD for audio). These states
7997  * require a full modeset on bootup to fix up.
7998  */
7999 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8000 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8001 		PIPE_CONF_CHECK_BOOL(name); \
8002 	} else { \
8003 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8004 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8005 				     yesno(current_config->name), \
8006 				     yesno(pipe_config->name)); \
8007 		ret = false; \
8008 	} \
8009 } while (0)
8010 
8011 #define PIPE_CONF_CHECK_P(name) do { \
8012 	if (current_config->name != pipe_config->name) { \
8013 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8014 				     "(expected %p, found %p)", \
8015 				     current_config->name, \
8016 				     pipe_config->name); \
8017 		ret = false; \
8018 	} \
8019 } while (0)
8020 
8021 #define PIPE_CONF_CHECK_M_N(name) do { \
8022 	if (!intel_compare_link_m_n(&current_config->name, \
8023 				    &pipe_config->name,\
8024 				    !fastset)) { \
8025 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8026 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8027 				     "found tu %i, gmch %i/%i link %i/%i)", \
8028 				     current_config->name.tu, \
8029 				     current_config->name.gmch_m, \
8030 				     current_config->name.gmch_n, \
8031 				     current_config->name.link_m, \
8032 				     current_config->name.link_n, \
8033 				     pipe_config->name.tu, \
8034 				     pipe_config->name.gmch_m, \
8035 				     pipe_config->name.gmch_n, \
8036 				     pipe_config->name.link_m, \
8037 				     pipe_config->name.link_n); \
8038 		ret = false; \
8039 	} \
8040 } while (0)
8041 
8042 /* This is required for BDW+ where there is only one set of registers for
8043  * switching between high and low RR.
8044  * This macro can be used whenever a comparison has to be made between one
8045  * hw state and multiple sw state variables.
8046  */
8047 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8048 	if (!intel_compare_link_m_n(&current_config->name, \
8049 				    &pipe_config->name, !fastset) && \
8050 	    !intel_compare_link_m_n(&current_config->alt_name, \
8051 				    &pipe_config->name, !fastset)) { \
8052 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8053 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8054 				     "or tu %i gmch %i/%i link %i/%i, " \
8055 				     "found tu %i, gmch %i/%i link %i/%i)", \
8056 				     current_config->name.tu, \
8057 				     current_config->name.gmch_m, \
8058 				     current_config->name.gmch_n, \
8059 				     current_config->name.link_m, \
8060 				     current_config->name.link_n, \
8061 				     current_config->alt_name.tu, \
8062 				     current_config->alt_name.gmch_m, \
8063 				     current_config->alt_name.gmch_n, \
8064 				     current_config->alt_name.link_m, \
8065 				     current_config->alt_name.link_n, \
8066 				     pipe_config->name.tu, \
8067 				     pipe_config->name.gmch_m, \
8068 				     pipe_config->name.gmch_n, \
8069 				     pipe_config->name.link_m, \
8070 				     pipe_config->name.link_n); \
8071 		ret = false; \
8072 	} \
8073 } while (0)
8074 
8075 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8076 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8077 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8078 				     "(%x) (expected %i, found %i)", \
8079 				     (mask), \
8080 				     current_config->name & (mask), \
8081 				     pipe_config->name & (mask)); \
8082 		ret = false; \
8083 	} \
8084 } while (0)
8085 
8086 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8087 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8088 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8089 				     "(expected %i, found %i)", \
8090 				     current_config->name, \
8091 				     pipe_config->name); \
8092 		ret = false; \
8093 	} \
8094 } while (0)
8095 
8096 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8097 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
8098 				     &pipe_config->infoframes.name)) { \
8099 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8100 					       &current_config->infoframes.name, \
8101 					       &pipe_config->infoframes.name); \
8102 		ret = false; \
8103 	} \
8104 } while (0)
8105 
8106 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8107 	if (!current_config->has_psr && !pipe_config->has_psr && \
8108 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8109 				      &pipe_config->infoframes.name)) { \
8110 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8111 						&current_config->infoframes.name, \
8112 						&pipe_config->infoframes.name); \
8113 		ret = false; \
8114 	} \
8115 } while (0)
8116 
8117 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8118 	if (current_config->name1 != pipe_config->name1) { \
8119 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8120 				"(expected %i, found %i, won't compare lut values)", \
8121 				current_config->name1, \
8122 				pipe_config->name1); \
8123 		ret = false;\
8124 	} else { \
8125 		if (!intel_color_lut_equal(current_config->name2, \
8126 					pipe_config->name2, pipe_config->name1, \
8127 					bit_precision)) { \
8128 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8129 					"hw_state doesn't match sw_state"); \
8130 			ret = false; \
8131 		} \
8132 	} \
8133 } while (0)
8134 
8135 #define PIPE_CONF_QUIRK(quirk) \
8136 	((current_config->quirks | pipe_config->quirks) & (quirk))
8137 
8138 	PIPE_CONF_CHECK_I(cpu_transcoder);
8139 
8140 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8141 	PIPE_CONF_CHECK_I(fdi_lanes);
8142 	PIPE_CONF_CHECK_M_N(fdi_m_n);
8143 
8144 	PIPE_CONF_CHECK_I(lane_count);
8145 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8146 
8147 	if (DISPLAY_VER(dev_priv) < 8) {
8148 		PIPE_CONF_CHECK_M_N(dp_m_n);
8149 
8150 		if (current_config->has_drrs)
8151 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
8152 	} else
8153 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8154 
8155 	PIPE_CONF_CHECK_X(output_types);
8156 
8157 	/* FIXME do the readout properly and get rid of this quirk */
8158 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8159 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8160 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8161 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8162 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8163 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8164 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8165 
8166 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8167 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8168 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8169 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8170 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8171 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8172 
8173 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8174 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8175 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8176 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8177 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8178 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8179 
8180 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8181 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8182 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8183 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8184 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8185 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8186 
8187 		PIPE_CONF_CHECK_I(pixel_multiplier);
8188 
8189 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8190 				      DRM_MODE_FLAG_INTERLACE);
8191 
8192 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8193 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8194 					      DRM_MODE_FLAG_PHSYNC);
8195 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8196 					      DRM_MODE_FLAG_NHSYNC);
8197 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8198 					      DRM_MODE_FLAG_PVSYNC);
8199 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8200 					      DRM_MODE_FLAG_NVSYNC);
8201 		}
8202 	}
8203 
8204 	PIPE_CONF_CHECK_I(output_format);
8205 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8206 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8207 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8208 		PIPE_CONF_CHECK_BOOL(limited_color_range);
8209 
8210 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8211 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8212 	PIPE_CONF_CHECK_BOOL(has_infoframe);
8213 	/* FIXME do the readout properly and get rid of this quirk */
8214 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8215 		PIPE_CONF_CHECK_BOOL(fec_enable);
8216 
8217 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8218 
8219 	PIPE_CONF_CHECK_X(gmch_pfit.control);
8220 	/* pfit ratios are autocomputed by the hw on gen4+ */
8221 	if (DISPLAY_VER(dev_priv) < 4)
8222 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8223 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8224 
8225 	/*
8226 	 * Changing the EDP transcoder input mux
8227 	 * (A_ONOFF vs. A_ON) requires a full modeset.
8228 	 */
8229 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8230 
8231 	if (!fastset) {
8232 		PIPE_CONF_CHECK_I(pipe_src_w);
8233 		PIPE_CONF_CHECK_I(pipe_src_h);
8234 
8235 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8236 		if (current_config->pch_pfit.enabled) {
8237 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8238 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8239 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8240 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8241 		}
8242 
8243 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8244 		/* FIXME do the readout properly and get rid of this quirk */
8245 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8246 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8247 
8248 		PIPE_CONF_CHECK_X(gamma_mode);
8249 		if (IS_CHERRYVIEW(dev_priv))
8250 			PIPE_CONF_CHECK_X(cgm_mode);
8251 		else
8252 			PIPE_CONF_CHECK_X(csc_mode);
8253 		PIPE_CONF_CHECK_BOOL(gamma_enable);
8254 		PIPE_CONF_CHECK_BOOL(csc_enable);
8255 
8256 		PIPE_CONF_CHECK_I(linetime);
8257 		PIPE_CONF_CHECK_I(ips_linetime);
8258 
8259 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8260 		if (bp_gamma)
8261 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8262 	}
8263 
8264 	PIPE_CONF_CHECK_BOOL(double_wide);
8265 
8266 	PIPE_CONF_CHECK_P(shared_dpll);
8267 
8268 	/* FIXME do the readout properly and get rid of this quirk */
8269 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8270 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8271 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8272 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8273 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8274 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8275 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8276 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8277 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8278 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8279 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8280 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8281 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8282 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8283 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8284 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8285 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8286 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8287 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8288 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8289 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8290 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8291 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8292 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8293 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8294 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8295 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8296 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8297 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8298 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8299 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8300 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8301 
8302 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8303 		PIPE_CONF_CHECK_X(dsi_pll.div);
8304 
8305 		if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8306 			PIPE_CONF_CHECK_I(pipe_bpp);
8307 
8308 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8309 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8310 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8311 
8312 		PIPE_CONF_CHECK_I(min_voltage_level);
8313 	}
8314 
8315 	PIPE_CONF_CHECK_X(infoframes.enable);
8316 	PIPE_CONF_CHECK_X(infoframes.gcp);
8317 	PIPE_CONF_CHECK_INFOFRAME(avi);
8318 	PIPE_CONF_CHECK_INFOFRAME(spd);
8319 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
8320 	PIPE_CONF_CHECK_INFOFRAME(drm);
8321 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8322 
8323 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8324 	PIPE_CONF_CHECK_I(master_transcoder);
8325 	PIPE_CONF_CHECK_BOOL(bigjoiner);
8326 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8327 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8328 
8329 	PIPE_CONF_CHECK_I(dsc.compression_enable);
8330 	PIPE_CONF_CHECK_I(dsc.dsc_split);
8331 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8332 
8333 	PIPE_CONF_CHECK_BOOL(splitter.enable);
8334 	PIPE_CONF_CHECK_I(splitter.link_count);
8335 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8336 
8337 	PIPE_CONF_CHECK_I(mst_master_transcoder);
8338 
8339 	PIPE_CONF_CHECK_BOOL(vrr.enable);
8340 	PIPE_CONF_CHECK_I(vrr.vmin);
8341 	PIPE_CONF_CHECK_I(vrr.vmax);
8342 	PIPE_CONF_CHECK_I(vrr.flipline);
8343 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
8344 
8345 #undef PIPE_CONF_CHECK_X
8346 #undef PIPE_CONF_CHECK_I
8347 #undef PIPE_CONF_CHECK_BOOL
8348 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8349 #undef PIPE_CONF_CHECK_P
8350 #undef PIPE_CONF_CHECK_FLAGS
8351 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8352 #undef PIPE_CONF_CHECK_COLOR_LUT
8353 #undef PIPE_CONF_QUIRK
8354 
8355 	return ret;
8356 }
8357 
8358 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8359 					   const struct intel_crtc_state *pipe_config)
8360 {
8361 	if (pipe_config->has_pch_encoder) {
8362 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8363 							    &pipe_config->fdi_m_n);
8364 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8365 
8366 		/*
8367 		 * FDI already provided one idea for the dotclock.
8368 		 * Yell if the encoder disagrees.
8369 		 */
8370 		drm_WARN(&dev_priv->drm,
8371 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8372 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8373 			 fdi_dotclock, dotclock);
8374 	}
8375 }
8376 
8377 static void verify_wm_state(struct intel_crtc *crtc,
8378 			    struct intel_crtc_state *new_crtc_state)
8379 {
8380 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8381 	struct skl_hw_state {
8382 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8383 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8384 		struct skl_pipe_wm wm;
8385 	} *hw;
8386 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8387 	int level, max_level = ilk_wm_max_level(dev_priv);
8388 	struct intel_plane *plane;
8389 	u8 hw_enabled_slices;
8390 
8391 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8392 		return;
8393 
8394 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8395 	if (!hw)
8396 		return;
8397 
8398 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8399 
8400 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8401 
8402 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8403 
8404 	if (DISPLAY_VER(dev_priv) >= 11 &&
8405 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8406 		drm_err(&dev_priv->drm,
8407 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8408 			dev_priv->dbuf.enabled_slices,
8409 			hw_enabled_slices);
8410 
8411 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8412 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8413 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8414 
8415 		/* Watermarks */
8416 		for (level = 0; level <= max_level; level++) {
8417 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8418 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8419 
8420 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8421 				continue;
8422 
8423 			drm_err(&dev_priv->drm,
8424 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8425 				plane->base.base.id, plane->base.name, level,
8426 				sw_wm_level->enable,
8427 				sw_wm_level->blocks,
8428 				sw_wm_level->lines,
8429 				hw_wm_level->enable,
8430 				hw_wm_level->blocks,
8431 				hw_wm_level->lines);
8432 		}
8433 
8434 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8435 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8436 
8437 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8438 			drm_err(&dev_priv->drm,
8439 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8440 				plane->base.base.id, plane->base.name,
8441 				sw_wm_level->enable,
8442 				sw_wm_level->blocks,
8443 				sw_wm_level->lines,
8444 				hw_wm_level->enable,
8445 				hw_wm_level->blocks,
8446 				hw_wm_level->lines);
8447 		}
8448 
8449 		/* DDB */
8450 		hw_ddb_entry = &hw->ddb_y[plane->id];
8451 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8452 
8453 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8454 			drm_err(&dev_priv->drm,
8455 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8456 				plane->base.base.id, plane->base.name,
8457 				sw_ddb_entry->start, sw_ddb_entry->end,
8458 				hw_ddb_entry->start, hw_ddb_entry->end);
8459 		}
8460 	}
8461 
8462 	kfree(hw);
8463 }
8464 
8465 static void
8466 verify_connector_state(struct intel_atomic_state *state,
8467 		       struct intel_crtc *crtc)
8468 {
8469 	struct drm_connector *connector;
8470 	struct drm_connector_state *new_conn_state;
8471 	int i;
8472 
8473 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8474 		struct drm_encoder *encoder = connector->encoder;
8475 		struct intel_crtc_state *crtc_state = NULL;
8476 
8477 		if (new_conn_state->crtc != &crtc->base)
8478 			continue;
8479 
8480 		if (crtc)
8481 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8482 
8483 		intel_connector_verify_state(crtc_state, new_conn_state);
8484 
8485 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8486 		     "connector's atomic encoder doesn't match legacy encoder\n");
8487 	}
8488 }
8489 
8490 static void
8491 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8492 {
8493 	struct intel_encoder *encoder;
8494 	struct drm_connector *connector;
8495 	struct drm_connector_state *old_conn_state, *new_conn_state;
8496 	int i;
8497 
8498 	for_each_intel_encoder(&dev_priv->drm, encoder) {
8499 		bool enabled = false, found = false;
8500 		enum pipe pipe;
8501 
8502 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8503 			    encoder->base.base.id,
8504 			    encoder->base.name);
8505 
8506 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8507 						   new_conn_state, i) {
8508 			if (old_conn_state->best_encoder == &encoder->base)
8509 				found = true;
8510 
8511 			if (new_conn_state->best_encoder != &encoder->base)
8512 				continue;
8513 			found = enabled = true;
8514 
8515 			I915_STATE_WARN(new_conn_state->crtc !=
8516 					encoder->base.crtc,
8517 			     "connector's crtc doesn't match encoder crtc\n");
8518 		}
8519 
8520 		if (!found)
8521 			continue;
8522 
8523 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
8524 		     "encoder's enabled state mismatch "
8525 		     "(expected %i, found %i)\n",
8526 		     !!encoder->base.crtc, enabled);
8527 
8528 		if (!encoder->base.crtc) {
8529 			bool active;
8530 
8531 			active = encoder->get_hw_state(encoder, &pipe);
8532 			I915_STATE_WARN(active,
8533 			     "encoder detached but still enabled on pipe %c.\n",
8534 			     pipe_name(pipe));
8535 		}
8536 	}
8537 }
8538 
8539 static void
8540 verify_crtc_state(struct intel_crtc *crtc,
8541 		  struct intel_crtc_state *old_crtc_state,
8542 		  struct intel_crtc_state *new_crtc_state)
8543 {
8544 	struct drm_device *dev = crtc->base.dev;
8545 	struct drm_i915_private *dev_priv = to_i915(dev);
8546 	struct intel_encoder *encoder;
8547 	struct intel_crtc_state *pipe_config = old_crtc_state;
8548 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
8549 	struct intel_crtc *master = crtc;
8550 
8551 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8552 	intel_crtc_free_hw_state(old_crtc_state);
8553 	intel_crtc_state_reset(old_crtc_state, crtc);
8554 	old_crtc_state->uapi.state = state;
8555 
8556 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8557 		    crtc->base.name);
8558 
8559 	pipe_config->hw.enable = new_crtc_state->hw.enable;
8560 
8561 	intel_crtc_get_pipe_config(pipe_config);
8562 
8563 	/* we keep both pipes enabled on 830 */
8564 	if (IS_I830(dev_priv) && pipe_config->hw.active)
8565 		pipe_config->hw.active = new_crtc_state->hw.active;
8566 
8567 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8568 			"crtc active state doesn't match with hw state "
8569 			"(expected %i, found %i)\n",
8570 			new_crtc_state->hw.active, pipe_config->hw.active);
8571 
8572 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8573 			"transitional active state does not match atomic hw state "
8574 			"(expected %i, found %i)\n",
8575 			new_crtc_state->hw.active, crtc->active);
8576 
8577 	if (new_crtc_state->bigjoiner_slave)
8578 		master = new_crtc_state->bigjoiner_linked_crtc;
8579 
8580 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
8581 		enum pipe pipe;
8582 		bool active;
8583 
8584 		active = encoder->get_hw_state(encoder, &pipe);
8585 		I915_STATE_WARN(active != new_crtc_state->hw.active,
8586 				"[ENCODER:%i] active %i with crtc active %i\n",
8587 				encoder->base.base.id, active,
8588 				new_crtc_state->hw.active);
8589 
8590 		I915_STATE_WARN(active && master->pipe != pipe,
8591 				"Encoder connected to wrong pipe %c\n",
8592 				pipe_name(pipe));
8593 
8594 		if (active)
8595 			intel_encoder_get_config(encoder, pipe_config);
8596 	}
8597 
8598 	if (!new_crtc_state->hw.active)
8599 		return;
8600 
8601 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
8602 
8603 	if (!intel_pipe_config_compare(new_crtc_state,
8604 				       pipe_config, false)) {
8605 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
8606 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8607 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8608 	}
8609 }
8610 
8611 static void
8612 intel_verify_planes(struct intel_atomic_state *state)
8613 {
8614 	struct intel_plane *plane;
8615 	const struct intel_plane_state *plane_state;
8616 	int i;
8617 
8618 	for_each_new_intel_plane_in_state(state, plane,
8619 					  plane_state, i)
8620 		assert_plane(plane, plane_state->planar_slave ||
8621 			     plane_state->uapi.visible);
8622 }
8623 
8624 static void
8625 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8626 			 struct intel_shared_dpll *pll,
8627 			 struct intel_crtc *crtc,
8628 			 struct intel_crtc_state *new_crtc_state)
8629 {
8630 	struct intel_dpll_hw_state dpll_hw_state;
8631 	u8 pipe_mask;
8632 	bool active;
8633 
8634 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8635 
8636 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8637 
8638 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8639 
8640 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8641 		I915_STATE_WARN(!pll->on && pll->active_mask,
8642 		     "pll in active use but not on in sw tracking\n");
8643 		I915_STATE_WARN(pll->on && !pll->active_mask,
8644 		     "pll is on but not used by any active pipe\n");
8645 		I915_STATE_WARN(pll->on != active,
8646 		     "pll on state mismatch (expected %i, found %i)\n",
8647 		     pll->on, active);
8648 	}
8649 
8650 	if (!crtc) {
8651 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8652 				"more active pll users than references: 0x%x vs 0x%x\n",
8653 				pll->active_mask, pll->state.pipe_mask);
8654 
8655 		return;
8656 	}
8657 
8658 	pipe_mask = BIT(crtc->pipe);
8659 
8660 	if (new_crtc_state->hw.active)
8661 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8662 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8663 				pipe_name(crtc->pipe), pll->active_mask);
8664 	else
8665 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8666 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8667 				pipe_name(crtc->pipe), pll->active_mask);
8668 
8669 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8670 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8671 			pipe_mask, pll->state.pipe_mask);
8672 
8673 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8674 					  &dpll_hw_state,
8675 					  sizeof(dpll_hw_state)),
8676 			"pll hw state mismatch\n");
8677 }
8678 
8679 static void
8680 verify_shared_dpll_state(struct intel_crtc *crtc,
8681 			 struct intel_crtc_state *old_crtc_state,
8682 			 struct intel_crtc_state *new_crtc_state)
8683 {
8684 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8685 
8686 	if (new_crtc_state->shared_dpll)
8687 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8688 
8689 	if (old_crtc_state->shared_dpll &&
8690 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8691 		u8 pipe_mask = BIT(crtc->pipe);
8692 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8693 
8694 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8695 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8696 				pipe_name(crtc->pipe), pll->active_mask);
8697 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8698 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8699 				pipe_name(crtc->pipe), pll->state.pipe_mask);
8700 	}
8701 }
8702 
8703 static void
8704 intel_modeset_verify_crtc(struct intel_crtc *crtc,
8705 			  struct intel_atomic_state *state,
8706 			  struct intel_crtc_state *old_crtc_state,
8707 			  struct intel_crtc_state *new_crtc_state)
8708 {
8709 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
8710 		return;
8711 
8712 	verify_wm_state(crtc, new_crtc_state);
8713 	verify_connector_state(state, crtc);
8714 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
8715 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
8716 }
8717 
8718 static void
8719 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
8720 {
8721 	int i;
8722 
8723 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
8724 		verify_single_dpll_state(dev_priv,
8725 					 &dev_priv->dpll.shared_dplls[i],
8726 					 NULL, NULL);
8727 }
8728 
8729 static void
8730 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
8731 			      struct intel_atomic_state *state)
8732 {
8733 	verify_encoder_state(dev_priv, state);
8734 	verify_connector_state(state, NULL);
8735 	verify_disabled_dpll_state(dev_priv);
8736 }
8737 
8738 static void
8739 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
8740 {
8741 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8742 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8743 	struct drm_display_mode adjusted_mode =
8744 		crtc_state->hw.adjusted_mode;
8745 
8746 	if (crtc_state->vrr.enable) {
8747 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
8748 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
8749 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
8750 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
8751 	}
8752 
8753 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
8754 
8755 	crtc->mode_flags = crtc_state->mode_flags;
8756 
8757 	/*
8758 	 * The scanline counter increments at the leading edge of hsync.
8759 	 *
8760 	 * On most platforms it starts counting from vtotal-1 on the
8761 	 * first active line. That means the scanline counter value is
8762 	 * always one less than what we would expect. Ie. just after
8763 	 * start of vblank, which also occurs at start of hsync (on the
8764 	 * last active line), the scanline counter will read vblank_start-1.
8765 	 *
8766 	 * On gen2 the scanline counter starts counting from 1 instead
8767 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
8768 	 * to keep the value positive), instead of adding one.
8769 	 *
8770 	 * On HSW+ the behaviour of the scanline counter depends on the output
8771 	 * type. For DP ports it behaves like most other platforms, but on HDMI
8772 	 * there's an extra 1 line difference. So we need to add two instead of
8773 	 * one to the value.
8774 	 *
8775 	 * On VLV/CHV DSI the scanline counter would appear to increment
8776 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
8777 	 * that means we can't tell whether we're in vblank or not while
8778 	 * we're on that particular line. We must still set scanline_offset
8779 	 * to 1 so that the vblank timestamps come out correct when we query
8780 	 * the scanline counter from within the vblank interrupt handler.
8781 	 * However if queried just before the start of vblank we'll get an
8782 	 * answer that's slightly in the future.
8783 	 */
8784 	if (IS_DISPLAY_VER(dev_priv, 2)) {
8785 		int vtotal;
8786 
8787 		vtotal = adjusted_mode.crtc_vtotal;
8788 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8789 			vtotal /= 2;
8790 
8791 		crtc->scanline_offset = vtotal - 1;
8792 	} else if (HAS_DDI(dev_priv) &&
8793 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
8794 		crtc->scanline_offset = 2;
8795 	} else {
8796 		crtc->scanline_offset = 1;
8797 	}
8798 }
8799 
8800 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
8801 {
8802 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8803 	struct intel_crtc_state *new_crtc_state;
8804 	struct intel_crtc *crtc;
8805 	int i;
8806 
8807 	if (!dev_priv->display.crtc_compute_clock)
8808 		return;
8809 
8810 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8811 		if (!intel_crtc_needs_modeset(new_crtc_state))
8812 			continue;
8813 
8814 		intel_release_shared_dplls(state, crtc);
8815 	}
8816 }
8817 
8818 /*
8819  * This implements the workaround described in the "notes" section of the mode
8820  * set sequence documentation. When going from no pipes or single pipe to
8821  * multiple pipes, and planes are enabled after the pipe, we need to wait at
8822  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
8823  */
8824 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
8825 {
8826 	struct intel_crtc_state *crtc_state;
8827 	struct intel_crtc *crtc;
8828 	struct intel_crtc_state *first_crtc_state = NULL;
8829 	struct intel_crtc_state *other_crtc_state = NULL;
8830 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
8831 	int i;
8832 
8833 	/* look at all crtc's that are going to be enabled in during modeset */
8834 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8835 		if (!crtc_state->hw.active ||
8836 		    !intel_crtc_needs_modeset(crtc_state))
8837 			continue;
8838 
8839 		if (first_crtc_state) {
8840 			other_crtc_state = crtc_state;
8841 			break;
8842 		} else {
8843 			first_crtc_state = crtc_state;
8844 			first_pipe = crtc->pipe;
8845 		}
8846 	}
8847 
8848 	/* No workaround needed? */
8849 	if (!first_crtc_state)
8850 		return 0;
8851 
8852 	/* w/a possibly needed, check how many crtc's are already enabled. */
8853 	for_each_intel_crtc(state->base.dev, crtc) {
8854 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8855 		if (IS_ERR(crtc_state))
8856 			return PTR_ERR(crtc_state);
8857 
8858 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
8859 
8860 		if (!crtc_state->hw.active ||
8861 		    intel_crtc_needs_modeset(crtc_state))
8862 			continue;
8863 
8864 		/* 2 or more enabled crtcs means no need for w/a */
8865 		if (enabled_pipe != INVALID_PIPE)
8866 			return 0;
8867 
8868 		enabled_pipe = crtc->pipe;
8869 	}
8870 
8871 	if (enabled_pipe != INVALID_PIPE)
8872 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
8873 	else if (other_crtc_state)
8874 		other_crtc_state->hsw_workaround_pipe = first_pipe;
8875 
8876 	return 0;
8877 }
8878 
8879 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
8880 			   u8 active_pipes)
8881 {
8882 	const struct intel_crtc_state *crtc_state;
8883 	struct intel_crtc *crtc;
8884 	int i;
8885 
8886 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8887 		if (crtc_state->hw.active)
8888 			active_pipes |= BIT(crtc->pipe);
8889 		else
8890 			active_pipes &= ~BIT(crtc->pipe);
8891 	}
8892 
8893 	return active_pipes;
8894 }
8895 
8896 static int intel_modeset_checks(struct intel_atomic_state *state)
8897 {
8898 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8899 
8900 	state->modeset = true;
8901 
8902 	if (IS_HASWELL(dev_priv))
8903 		return hsw_mode_set_planes_workaround(state);
8904 
8905 	return 0;
8906 }
8907 
8908 /*
8909  * Handle calculation of various watermark data at the end of the atomic check
8910  * phase.  The code here should be run after the per-crtc and per-plane 'check'
8911  * handlers to ensure that all derived state has been updated.
8912  */
8913 static int calc_watermark_data(struct intel_atomic_state *state)
8914 {
8915 	struct drm_device *dev = state->base.dev;
8916 	struct drm_i915_private *dev_priv = to_i915(dev);
8917 
8918 	/* Is there platform-specific watermark information to calculate? */
8919 	if (dev_priv->display.compute_global_watermarks)
8920 		return dev_priv->display.compute_global_watermarks(state);
8921 
8922 	return 0;
8923 }
8924 
8925 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
8926 				     struct intel_crtc_state *new_crtc_state)
8927 {
8928 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
8929 		return;
8930 
8931 	new_crtc_state->uapi.mode_changed = false;
8932 	new_crtc_state->update_pipe = true;
8933 }
8934 
8935 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
8936 				    struct intel_crtc_state *new_crtc_state)
8937 {
8938 	/*
8939 	 * If we're not doing the full modeset we want to
8940 	 * keep the current M/N values as they may be
8941 	 * sufficiently different to the computed values
8942 	 * to cause problems.
8943 	 *
8944 	 * FIXME: should really copy more fuzzy state here
8945 	 */
8946 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
8947 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
8948 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
8949 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
8950 }
8951 
8952 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
8953 					  struct intel_crtc *crtc,
8954 					  u8 plane_ids_mask)
8955 {
8956 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8957 	struct intel_plane *plane;
8958 
8959 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8960 		struct intel_plane_state *plane_state;
8961 
8962 		if ((plane_ids_mask & BIT(plane->id)) == 0)
8963 			continue;
8964 
8965 		plane_state = intel_atomic_get_plane_state(state, plane);
8966 		if (IS_ERR(plane_state))
8967 			return PTR_ERR(plane_state);
8968 	}
8969 
8970 	return 0;
8971 }
8972 
8973 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
8974 				     struct intel_crtc *crtc)
8975 {
8976 	const struct intel_crtc_state *old_crtc_state =
8977 		intel_atomic_get_old_crtc_state(state, crtc);
8978 	const struct intel_crtc_state *new_crtc_state =
8979 		intel_atomic_get_new_crtc_state(state, crtc);
8980 
8981 	return intel_crtc_add_planes_to_state(state, crtc,
8982 					      old_crtc_state->enabled_planes |
8983 					      new_crtc_state->enabled_planes);
8984 }
8985 
8986 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
8987 {
8988 	/* See {hsw,vlv,ivb}_plane_ratio() */
8989 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
8990 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8991 		IS_IVYBRIDGE(dev_priv);
8992 }
8993 
8994 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
8995 					   struct intel_crtc *crtc,
8996 					   struct intel_crtc *other)
8997 {
8998 	const struct intel_plane_state *plane_state;
8999 	struct intel_plane *plane;
9000 	u8 plane_ids = 0;
9001 	int i;
9002 
9003 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9004 		if (plane->pipe == crtc->pipe)
9005 			plane_ids |= BIT(plane->id);
9006 	}
9007 
9008 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
9009 }
9010 
9011 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9012 {
9013 	const struct intel_crtc_state *crtc_state;
9014 	struct intel_crtc *crtc;
9015 	int i;
9016 
9017 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9018 		int ret;
9019 
9020 		if (!crtc_state->bigjoiner)
9021 			continue;
9022 
9023 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9024 						      crtc_state->bigjoiner_linked_crtc);
9025 		if (ret)
9026 			return ret;
9027 	}
9028 
9029 	return 0;
9030 }
9031 
9032 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9033 {
9034 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9035 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9036 	struct intel_plane_state *plane_state;
9037 	struct intel_plane *plane;
9038 	struct intel_crtc *crtc;
9039 	int i, ret;
9040 
9041 	ret = icl_add_linked_planes(state);
9042 	if (ret)
9043 		return ret;
9044 
9045 	ret = intel_bigjoiner_add_affected_planes(state);
9046 	if (ret)
9047 		return ret;
9048 
9049 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9050 		ret = intel_plane_atomic_check(state, plane);
9051 		if (ret) {
9052 			drm_dbg_atomic(&dev_priv->drm,
9053 				       "[PLANE:%d:%s] atomic driver check failed\n",
9054 				       plane->base.base.id, plane->base.name);
9055 			return ret;
9056 		}
9057 	}
9058 
9059 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9060 					    new_crtc_state, i) {
9061 		u8 old_active_planes, new_active_planes;
9062 
9063 		ret = icl_check_nv12_planes(new_crtc_state);
9064 		if (ret)
9065 			return ret;
9066 
9067 		/*
9068 		 * On some platforms the number of active planes affects
9069 		 * the planes' minimum cdclk calculation. Add such planes
9070 		 * to the state before we compute the minimum cdclk.
9071 		 */
9072 		if (!active_planes_affects_min_cdclk(dev_priv))
9073 			continue;
9074 
9075 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9076 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9077 
9078 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
9079 			continue;
9080 
9081 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9082 		if (ret)
9083 			return ret;
9084 	}
9085 
9086 	return 0;
9087 }
9088 
9089 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9090 				    bool *need_cdclk_calc)
9091 {
9092 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9093 	const struct intel_cdclk_state *old_cdclk_state;
9094 	const struct intel_cdclk_state *new_cdclk_state;
9095 	struct intel_plane_state *plane_state;
9096 	struct intel_bw_state *new_bw_state;
9097 	struct intel_plane *plane;
9098 	int min_cdclk = 0;
9099 	enum pipe pipe;
9100 	int ret;
9101 	int i;
9102 	/*
9103 	 * active_planes bitmask has been updated, and potentially
9104 	 * affected planes are part of the state. We can now
9105 	 * compute the minimum cdclk for each plane.
9106 	 */
9107 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9108 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9109 		if (ret)
9110 			return ret;
9111 	}
9112 
9113 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9114 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9115 
9116 	if (new_cdclk_state &&
9117 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9118 		*need_cdclk_calc = true;
9119 
9120 	ret = dev_priv->display.bw_calc_min_cdclk(state);
9121 	if (ret)
9122 		return ret;
9123 
9124 	new_bw_state = intel_atomic_get_new_bw_state(state);
9125 
9126 	if (!new_cdclk_state || !new_bw_state)
9127 		return 0;
9128 
9129 	for_each_pipe(dev_priv, pipe) {
9130 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9131 
9132 		/*
9133 		 * Currently do this change only if we need to increase
9134 		 */
9135 		if (new_bw_state->min_cdclk > min_cdclk)
9136 			*need_cdclk_calc = true;
9137 	}
9138 
9139 	return 0;
9140 }
9141 
9142 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9143 {
9144 	struct intel_crtc_state *crtc_state;
9145 	struct intel_crtc *crtc;
9146 	int i;
9147 
9148 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9149 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9150 		int ret;
9151 
9152 		ret = intel_crtc_atomic_check(state, crtc);
9153 		if (ret) {
9154 			drm_dbg_atomic(&i915->drm,
9155 				       "[CRTC:%d:%s] atomic driver check failed\n",
9156 				       crtc->base.base.id, crtc->base.name);
9157 			return ret;
9158 		}
9159 	}
9160 
9161 	return 0;
9162 }
9163 
9164 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9165 					       u8 transcoders)
9166 {
9167 	const struct intel_crtc_state *new_crtc_state;
9168 	struct intel_crtc *crtc;
9169 	int i;
9170 
9171 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9172 		if (new_crtc_state->hw.enable &&
9173 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9174 		    intel_crtc_needs_modeset(new_crtc_state))
9175 			return true;
9176 	}
9177 
9178 	return false;
9179 }
9180 
9181 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9182 					struct intel_crtc *crtc,
9183 					struct intel_crtc_state *old_crtc_state,
9184 					struct intel_crtc_state *new_crtc_state)
9185 {
9186 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9187 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9188 	struct intel_crtc *slave, *master;
9189 
9190 	/* slave being enabled, is master is still claiming this crtc? */
9191 	if (old_crtc_state->bigjoiner_slave) {
9192 		slave = crtc;
9193 		master = old_crtc_state->bigjoiner_linked_crtc;
9194 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9195 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9196 			goto claimed;
9197 	}
9198 
9199 	if (!new_crtc_state->bigjoiner)
9200 		return 0;
9201 
9202 	if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
9203 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9204 			      "CRTC + 1 to be used, doesn't exist\n",
9205 			      crtc->base.base.id, crtc->base.name);
9206 		return -EINVAL;
9207 	}
9208 
9209 	slave = new_crtc_state->bigjoiner_linked_crtc =
9210 		intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
9211 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9212 	master = crtc;
9213 	if (IS_ERR(slave_crtc_state))
9214 		return PTR_ERR(slave_crtc_state);
9215 
9216 	/* master being enabled, slave was already configured? */
9217 	if (slave_crtc_state->uapi.enable)
9218 		goto claimed;
9219 
9220 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9221 		      slave->base.base.id, slave->base.name);
9222 
9223 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9224 
9225 claimed:
9226 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9227 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9228 		      slave->base.base.id, slave->base.name,
9229 		      master->base.base.id, master->base.name);
9230 	return -EINVAL;
9231 }
9232 
9233 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9234 				 struct intel_crtc_state *master_crtc_state)
9235 {
9236 	struct intel_crtc_state *slave_crtc_state =
9237 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9238 
9239 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9240 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9241 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9242 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9243 }
9244 
9245 /**
9246  * DOC: asynchronous flip implementation
9247  *
9248  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9249  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9250  * Correspondingly, support is currently added for primary plane only.
9251  *
9252  * Async flip can only change the plane surface address, so anything else
9253  * changing is rejected from the intel_atomic_check_async() function.
9254  * Once this check is cleared, flip done interrupt is enabled using
9255  * the intel_crtc_enable_flip_done() function.
9256  *
9257  * As soon as the surface address register is written, flip done interrupt is
9258  * generated and the requested events are sent to the usersapce in the interrupt
9259  * handler itself. The timestamp and sequence sent during the flip done event
9260  * correspond to the last vblank and have no relation to the actual time when
9261  * the flip done event was sent.
9262  */
9263 static int intel_atomic_check_async(struct intel_atomic_state *state)
9264 {
9265 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9266 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9267 	const struct intel_plane_state *new_plane_state, *old_plane_state;
9268 	struct intel_crtc *crtc;
9269 	struct intel_plane *plane;
9270 	int i;
9271 
9272 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9273 					    new_crtc_state, i) {
9274 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9275 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9276 			return -EINVAL;
9277 		}
9278 
9279 		if (!new_crtc_state->hw.active) {
9280 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9281 			return -EINVAL;
9282 		}
9283 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9284 			drm_dbg_kms(&i915->drm,
9285 				    "Active planes cannot be changed during async flip\n");
9286 			return -EINVAL;
9287 		}
9288 	}
9289 
9290 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9291 					     new_plane_state, i) {
9292 		/*
9293 		 * TODO: Async flip is only supported through the page flip IOCTL
9294 		 * as of now. So support currently added for primary plane only.
9295 		 * Support for other planes on platforms on which supports
9296 		 * this(vlv/chv and icl+) should be added when async flip is
9297 		 * enabled in the atomic IOCTL path.
9298 		 */
9299 		if (!plane->async_flip)
9300 			return -EINVAL;
9301 
9302 		/*
9303 		 * FIXME: This check is kept generic for all platforms.
9304 		 * Need to verify this for all gen9 and gen10 platforms to enable
9305 		 * this selectively if required.
9306 		 */
9307 		switch (new_plane_state->hw.fb->modifier) {
9308 		case I915_FORMAT_MOD_X_TILED:
9309 		case I915_FORMAT_MOD_Y_TILED:
9310 		case I915_FORMAT_MOD_Yf_TILED:
9311 			break;
9312 		default:
9313 			drm_dbg_kms(&i915->drm,
9314 				    "Linear memory/CCS does not support async flips\n");
9315 			return -EINVAL;
9316 		}
9317 
9318 		if (old_plane_state->view.color_plane[0].stride !=
9319 		    new_plane_state->view.color_plane[0].stride) {
9320 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9321 			return -EINVAL;
9322 		}
9323 
9324 		if (old_plane_state->hw.fb->modifier !=
9325 		    new_plane_state->hw.fb->modifier) {
9326 			drm_dbg_kms(&i915->drm,
9327 				    "Framebuffer modifiers cannot be changed in async flip\n");
9328 			return -EINVAL;
9329 		}
9330 
9331 		if (old_plane_state->hw.fb->format !=
9332 		    new_plane_state->hw.fb->format) {
9333 			drm_dbg_kms(&i915->drm,
9334 				    "Framebuffer format cannot be changed in async flip\n");
9335 			return -EINVAL;
9336 		}
9337 
9338 		if (old_plane_state->hw.rotation !=
9339 		    new_plane_state->hw.rotation) {
9340 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9341 			return -EINVAL;
9342 		}
9343 
9344 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9345 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9346 			drm_dbg_kms(&i915->drm,
9347 				    "Plane size/co-ordinates cannot be changed in async flip\n");
9348 			return -EINVAL;
9349 		}
9350 
9351 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9352 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9353 			return -EINVAL;
9354 		}
9355 
9356 		if (old_plane_state->hw.pixel_blend_mode !=
9357 		    new_plane_state->hw.pixel_blend_mode) {
9358 			drm_dbg_kms(&i915->drm,
9359 				    "Pixel blend mode cannot be changed in async flip\n");
9360 			return -EINVAL;
9361 		}
9362 
9363 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9364 			drm_dbg_kms(&i915->drm,
9365 				    "Color encoding cannot be changed in async flip\n");
9366 			return -EINVAL;
9367 		}
9368 
9369 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9370 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9371 			return -EINVAL;
9372 		}
9373 	}
9374 
9375 	return 0;
9376 }
9377 
9378 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9379 {
9380 	struct intel_crtc_state *crtc_state;
9381 	struct intel_crtc *crtc;
9382 	int i;
9383 
9384 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9385 		struct intel_crtc_state *linked_crtc_state;
9386 		struct intel_crtc *linked_crtc;
9387 		int ret;
9388 
9389 		if (!crtc_state->bigjoiner)
9390 			continue;
9391 
9392 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
9393 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9394 		if (IS_ERR(linked_crtc_state))
9395 			return PTR_ERR(linked_crtc_state);
9396 
9397 		if (!intel_crtc_needs_modeset(crtc_state))
9398 			continue;
9399 
9400 		linked_crtc_state->uapi.mode_changed = true;
9401 
9402 		ret = drm_atomic_add_affected_connectors(&state->base,
9403 							 &linked_crtc->base);
9404 		if (ret)
9405 			return ret;
9406 
9407 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
9408 		if (ret)
9409 			return ret;
9410 	}
9411 
9412 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9413 		/* Kill old bigjoiner link, we may re-establish afterwards */
9414 		if (intel_crtc_needs_modeset(crtc_state) &&
9415 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9416 			kill_bigjoiner_slave(state, crtc_state);
9417 	}
9418 
9419 	return 0;
9420 }
9421 
9422 /**
9423  * intel_atomic_check - validate state object
9424  * @dev: drm device
9425  * @_state: state to validate
9426  */
9427 static int intel_atomic_check(struct drm_device *dev,
9428 			      struct drm_atomic_state *_state)
9429 {
9430 	struct drm_i915_private *dev_priv = to_i915(dev);
9431 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
9432 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9433 	struct intel_crtc *crtc;
9434 	int ret, i;
9435 	bool any_ms = false;
9436 
9437 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9438 					    new_crtc_state, i) {
9439 		if (new_crtc_state->inherited != old_crtc_state->inherited)
9440 			new_crtc_state->uapi.mode_changed = true;
9441 	}
9442 
9443 	intel_vrr_check_modeset(state);
9444 
9445 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
9446 	if (ret)
9447 		goto fail;
9448 
9449 	ret = intel_bigjoiner_add_affected_crtcs(state);
9450 	if (ret)
9451 		goto fail;
9452 
9453 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9454 					    new_crtc_state, i) {
9455 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
9456 			/* Light copy */
9457 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9458 
9459 			continue;
9460 		}
9461 
9462 		if (!new_crtc_state->uapi.enable) {
9463 			if (!new_crtc_state->bigjoiner_slave) {
9464 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9465 				any_ms = true;
9466 			}
9467 			continue;
9468 		}
9469 
9470 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9471 		if (ret)
9472 			goto fail;
9473 
9474 		ret = intel_modeset_pipe_config(state, new_crtc_state);
9475 		if (ret)
9476 			goto fail;
9477 
9478 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9479 						   new_crtc_state);
9480 		if (ret)
9481 			goto fail;
9482 	}
9483 
9484 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9485 					    new_crtc_state, i) {
9486 		if (!intel_crtc_needs_modeset(new_crtc_state))
9487 			continue;
9488 
9489 		ret = intel_modeset_pipe_config_late(new_crtc_state);
9490 		if (ret)
9491 			goto fail;
9492 
9493 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9494 	}
9495 
9496 	/**
9497 	 * Check if fastset is allowed by external dependencies like other
9498 	 * pipes and transcoders.
9499 	 *
9500 	 * Right now it only forces a fullmodeset when the MST master
9501 	 * transcoder did not changed but the pipe of the master transcoder
9502 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9503 	 * in case of port synced crtcs, if one of the synced crtcs
9504 	 * needs a full modeset, all other synced crtcs should be
9505 	 * forced a full modeset.
9506 	 */
9507 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9508 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9509 			continue;
9510 
9511 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9512 			enum transcoder master = new_crtc_state->mst_master_transcoder;
9513 
9514 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9515 				new_crtc_state->uapi.mode_changed = true;
9516 				new_crtc_state->update_pipe = false;
9517 			}
9518 		}
9519 
9520 		if (is_trans_port_sync_mode(new_crtc_state)) {
9521 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
9522 
9523 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9524 				trans |= BIT(new_crtc_state->master_transcoder);
9525 
9526 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
9527 				new_crtc_state->uapi.mode_changed = true;
9528 				new_crtc_state->update_pipe = false;
9529 			}
9530 		}
9531 
9532 		if (new_crtc_state->bigjoiner) {
9533 			struct intel_crtc_state *linked_crtc_state =
9534 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9535 
9536 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
9537 				new_crtc_state->uapi.mode_changed = true;
9538 				new_crtc_state->update_pipe = false;
9539 			}
9540 		}
9541 	}
9542 
9543 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9544 					    new_crtc_state, i) {
9545 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9546 			any_ms = true;
9547 			continue;
9548 		}
9549 
9550 		if (!new_crtc_state->update_pipe)
9551 			continue;
9552 
9553 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9554 	}
9555 
9556 	if (any_ms && !check_digital_port_conflicts(state)) {
9557 		drm_dbg_kms(&dev_priv->drm,
9558 			    "rejecting conflicting digital port configuration\n");
9559 		ret = -EINVAL;
9560 		goto fail;
9561 	}
9562 
9563 	ret = drm_dp_mst_atomic_check(&state->base);
9564 	if (ret)
9565 		goto fail;
9566 
9567 	ret = intel_atomic_check_planes(state);
9568 	if (ret)
9569 		goto fail;
9570 
9571 	intel_fbc_choose_crtc(dev_priv, state);
9572 	ret = calc_watermark_data(state);
9573 	if (ret)
9574 		goto fail;
9575 
9576 	ret = intel_bw_atomic_check(state);
9577 	if (ret)
9578 		goto fail;
9579 
9580 	ret = intel_atomic_check_cdclk(state, &any_ms);
9581 	if (ret)
9582 		goto fail;
9583 
9584 	if (any_ms) {
9585 		ret = intel_modeset_checks(state);
9586 		if (ret)
9587 			goto fail;
9588 
9589 		ret = intel_modeset_calc_cdclk(state);
9590 		if (ret)
9591 			return ret;
9592 
9593 		intel_modeset_clear_plls(state);
9594 	}
9595 
9596 	ret = intel_atomic_check_crtcs(state);
9597 	if (ret)
9598 		goto fail;
9599 
9600 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9601 					    new_crtc_state, i) {
9602 		if (new_crtc_state->uapi.async_flip) {
9603 			ret = intel_atomic_check_async(state);
9604 			if (ret)
9605 				goto fail;
9606 		}
9607 
9608 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
9609 		    !new_crtc_state->update_pipe)
9610 			continue;
9611 
9612 		intel_dump_pipe_config(new_crtc_state, state,
9613 				       intel_crtc_needs_modeset(new_crtc_state) ?
9614 				       "[modeset]" : "[fastset]");
9615 	}
9616 
9617 	return 0;
9618 
9619  fail:
9620 	if (ret == -EDEADLK)
9621 		return ret;
9622 
9623 	/*
9624 	 * FIXME would probably be nice to know which crtc specifically
9625 	 * caused the failure, in cases where we can pinpoint it.
9626 	 */
9627 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9628 					    new_crtc_state, i)
9629 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
9630 
9631 	return ret;
9632 }
9633 
9634 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
9635 {
9636 	struct intel_crtc_state *crtc_state;
9637 	struct intel_crtc *crtc;
9638 	int i, ret;
9639 
9640 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
9641 	if (ret < 0)
9642 		return ret;
9643 
9644 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9645 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9646 
9647 		if (mode_changed || crtc_state->update_pipe ||
9648 		    crtc_state->uapi.color_mgmt_changed) {
9649 			intel_dsb_prepare(crtc_state);
9650 		}
9651 	}
9652 
9653 	return 0;
9654 }
9655 
9656 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
9657 				  struct intel_crtc_state *crtc_state)
9658 {
9659 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9660 
9661 	if (!IS_DISPLAY_VER(dev_priv, 2) || crtc_state->active_planes)
9662 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
9663 
9664 	if (crtc_state->has_pch_encoder) {
9665 		enum pipe pch_transcoder =
9666 			intel_crtc_pch_transcoder(crtc);
9667 
9668 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
9669 	}
9670 }
9671 
9672 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
9673 			       const struct intel_crtc_state *new_crtc_state)
9674 {
9675 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9676 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9677 
9678 	/*
9679 	 * Update pipe size and adjust fitter if needed: the reason for this is
9680 	 * that in compute_mode_changes we check the native mode (not the pfit
9681 	 * mode) to see if we can flip rather than do a full mode set. In the
9682 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
9683 	 * pfit state, we'll end up with a big fb scanned out into the wrong
9684 	 * sized surface.
9685 	 */
9686 	intel_set_pipe_src_size(new_crtc_state);
9687 
9688 	/* on skylake this is done by detaching scalers */
9689 	if (DISPLAY_VER(dev_priv) >= 9) {
9690 		skl_detach_scalers(new_crtc_state);
9691 
9692 		if (new_crtc_state->pch_pfit.enabled)
9693 			skl_pfit_enable(new_crtc_state);
9694 	} else if (HAS_PCH_SPLIT(dev_priv)) {
9695 		if (new_crtc_state->pch_pfit.enabled)
9696 			ilk_pfit_enable(new_crtc_state);
9697 		else if (old_crtc_state->pch_pfit.enabled)
9698 			ilk_pfit_disable(old_crtc_state);
9699 	}
9700 
9701 	/*
9702 	 * The register is supposedly single buffered so perhaps
9703 	 * not 100% correct to do this here. But SKL+ calculate
9704 	 * this based on the adjust pixel rate so pfit changes do
9705 	 * affect it and so it must be updated for fastsets.
9706 	 * HSW/BDW only really need this here for fastboot, after
9707 	 * that the value should not change without a full modeset.
9708 	 */
9709 	if (DISPLAY_VER(dev_priv) >= 9 ||
9710 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
9711 		hsw_set_linetime_wm(new_crtc_state);
9712 
9713 	if (DISPLAY_VER(dev_priv) >= 11)
9714 		icl_set_pipe_chicken(crtc);
9715 }
9716 
9717 static void commit_pipe_config(struct intel_atomic_state *state,
9718 			       struct intel_crtc *crtc)
9719 {
9720 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9721 	const struct intel_crtc_state *old_crtc_state =
9722 		intel_atomic_get_old_crtc_state(state, crtc);
9723 	const struct intel_crtc_state *new_crtc_state =
9724 		intel_atomic_get_new_crtc_state(state, crtc);
9725 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9726 
9727 	/*
9728 	 * During modesets pipe configuration was programmed as the
9729 	 * CRTC was enabled.
9730 	 */
9731 	if (!modeset) {
9732 		if (new_crtc_state->uapi.color_mgmt_changed ||
9733 		    new_crtc_state->update_pipe)
9734 			intel_color_commit(new_crtc_state);
9735 
9736 		if (DISPLAY_VER(dev_priv) >= 9)
9737 			skl_detach_scalers(new_crtc_state);
9738 
9739 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
9740 			bdw_set_pipemisc(new_crtc_state);
9741 
9742 		if (new_crtc_state->update_pipe)
9743 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
9744 
9745 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
9746 	}
9747 
9748 	if (dev_priv->display.atomic_update_watermarks)
9749 		dev_priv->display.atomic_update_watermarks(state, crtc);
9750 }
9751 
9752 static void intel_enable_crtc(struct intel_atomic_state *state,
9753 			      struct intel_crtc *crtc)
9754 {
9755 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9756 	const struct intel_crtc_state *new_crtc_state =
9757 		intel_atomic_get_new_crtc_state(state, crtc);
9758 
9759 	if (!intel_crtc_needs_modeset(new_crtc_state))
9760 		return;
9761 
9762 	intel_crtc_update_active_timings(new_crtc_state);
9763 
9764 	dev_priv->display.crtc_enable(state, crtc);
9765 
9766 	if (new_crtc_state->bigjoiner_slave)
9767 		return;
9768 
9769 	/* vblanks work again, re-enable pipe CRC. */
9770 	intel_crtc_enable_pipe_crc(crtc);
9771 }
9772 
9773 static void intel_update_crtc(struct intel_atomic_state *state,
9774 			      struct intel_crtc *crtc)
9775 {
9776 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9777 	const struct intel_crtc_state *old_crtc_state =
9778 		intel_atomic_get_old_crtc_state(state, crtc);
9779 	struct intel_crtc_state *new_crtc_state =
9780 		intel_atomic_get_new_crtc_state(state, crtc);
9781 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9782 
9783 	if (!modeset) {
9784 		if (new_crtc_state->preload_luts &&
9785 		    (new_crtc_state->uapi.color_mgmt_changed ||
9786 		     new_crtc_state->update_pipe))
9787 			intel_color_load_luts(new_crtc_state);
9788 
9789 		intel_pre_plane_update(state, crtc);
9790 
9791 		if (new_crtc_state->update_pipe)
9792 			intel_encoders_update_pipe(state, crtc);
9793 	}
9794 
9795 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
9796 		intel_fbc_disable(crtc);
9797 	else
9798 		intel_fbc_enable(state, crtc);
9799 
9800 	/* Perform vblank evasion around commit operation */
9801 	intel_pipe_update_start(new_crtc_state);
9802 
9803 	commit_pipe_config(state, crtc);
9804 
9805 	if (DISPLAY_VER(dev_priv) >= 9)
9806 		skl_update_planes_on_crtc(state, crtc);
9807 	else
9808 		i9xx_update_planes_on_crtc(state, crtc);
9809 
9810 	intel_pipe_update_end(new_crtc_state);
9811 
9812 	/*
9813 	 * We usually enable FIFO underrun interrupts as part of the
9814 	 * CRTC enable sequence during modesets.  But when we inherit a
9815 	 * valid pipe configuration from the BIOS we need to take care
9816 	 * of enabling them on the CRTC's first fastset.
9817 	 */
9818 	if (new_crtc_state->update_pipe && !modeset &&
9819 	    old_crtc_state->inherited)
9820 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
9821 }
9822 
9823 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
9824 					  struct intel_crtc_state *old_crtc_state,
9825 					  struct intel_crtc_state *new_crtc_state,
9826 					  struct intel_crtc *crtc)
9827 {
9828 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9829 
9830 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
9831 
9832 	intel_crtc_disable_planes(state, crtc);
9833 
9834 	/*
9835 	 * We still need special handling for disabling bigjoiner master
9836 	 * and slaves since for slave we do not have encoder or plls
9837 	 * so we dont need to disable those.
9838 	 */
9839 	if (old_crtc_state->bigjoiner) {
9840 		intel_crtc_disable_planes(state,
9841 					  old_crtc_state->bigjoiner_linked_crtc);
9842 		old_crtc_state->bigjoiner_linked_crtc->active = false;
9843 	}
9844 
9845 	/*
9846 	 * We need to disable pipe CRC before disabling the pipe,
9847 	 * or we race against vblank off.
9848 	 */
9849 	intel_crtc_disable_pipe_crc(crtc);
9850 
9851 	dev_priv->display.crtc_disable(state, crtc);
9852 	crtc->active = false;
9853 	intel_fbc_disable(crtc);
9854 	intel_disable_shared_dpll(old_crtc_state);
9855 
9856 	/* FIXME unify this for all platforms */
9857 	if (!new_crtc_state->hw.active &&
9858 	    !HAS_GMCH(dev_priv) &&
9859 	    dev_priv->display.initial_watermarks)
9860 		dev_priv->display.initial_watermarks(state, crtc);
9861 }
9862 
9863 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
9864 {
9865 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
9866 	struct intel_crtc *crtc;
9867 	u32 handled = 0;
9868 	int i;
9869 
9870 	/* Only disable port sync and MST slaves */
9871 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9872 					    new_crtc_state, i) {
9873 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
9874 			continue;
9875 
9876 		if (!old_crtc_state->hw.active)
9877 			continue;
9878 
9879 		/* In case of Transcoder port Sync master slave CRTCs can be
9880 		 * assigned in any order and we need to make sure that
9881 		 * slave CRTCs are disabled first and then master CRTC since
9882 		 * Slave vblanks are masked till Master Vblanks.
9883 		 */
9884 		if (!is_trans_port_sync_slave(old_crtc_state) &&
9885 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
9886 			continue;
9887 
9888 		intel_pre_plane_update(state, crtc);
9889 		intel_old_crtc_state_disables(state, old_crtc_state,
9890 					      new_crtc_state, crtc);
9891 		handled |= BIT(crtc->pipe);
9892 	}
9893 
9894 	/* Disable everything else left on */
9895 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9896 					    new_crtc_state, i) {
9897 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
9898 		    (handled & BIT(crtc->pipe)) ||
9899 		    old_crtc_state->bigjoiner_slave)
9900 			continue;
9901 
9902 		intel_pre_plane_update(state, crtc);
9903 		if (old_crtc_state->bigjoiner) {
9904 			struct intel_crtc *slave =
9905 				old_crtc_state->bigjoiner_linked_crtc;
9906 
9907 			intel_pre_plane_update(state, slave);
9908 		}
9909 
9910 		if (old_crtc_state->hw.active)
9911 			intel_old_crtc_state_disables(state, old_crtc_state,
9912 						      new_crtc_state, crtc);
9913 	}
9914 }
9915 
9916 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
9917 {
9918 	struct intel_crtc_state *new_crtc_state;
9919 	struct intel_crtc *crtc;
9920 	int i;
9921 
9922 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9923 		if (!new_crtc_state->hw.active)
9924 			continue;
9925 
9926 		intel_enable_crtc(state, crtc);
9927 		intel_update_crtc(state, crtc);
9928 	}
9929 }
9930 
9931 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
9932 {
9933 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9934 	struct intel_crtc *crtc;
9935 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9936 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
9937 	u8 update_pipes = 0, modeset_pipes = 0;
9938 	int i;
9939 
9940 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9941 		enum pipe pipe = crtc->pipe;
9942 
9943 		if (!new_crtc_state->hw.active)
9944 			continue;
9945 
9946 		/* ignore allocations for crtc's that have been turned off. */
9947 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
9948 			entries[pipe] = old_crtc_state->wm.skl.ddb;
9949 			update_pipes |= BIT(pipe);
9950 		} else {
9951 			modeset_pipes |= BIT(pipe);
9952 		}
9953 	}
9954 
9955 	/*
9956 	 * Whenever the number of active pipes changes, we need to make sure we
9957 	 * update the pipes in the right order so that their ddb allocations
9958 	 * never overlap with each other between CRTC updates. Otherwise we'll
9959 	 * cause pipe underruns and other bad stuff.
9960 	 *
9961 	 * So first lets enable all pipes that do not need a fullmodeset as
9962 	 * those don't have any external dependency.
9963 	 */
9964 	while (update_pipes) {
9965 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9966 						    new_crtc_state, i) {
9967 			enum pipe pipe = crtc->pipe;
9968 
9969 			if ((update_pipes & BIT(pipe)) == 0)
9970 				continue;
9971 
9972 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
9973 							entries, I915_MAX_PIPES, pipe))
9974 				continue;
9975 
9976 			entries[pipe] = new_crtc_state->wm.skl.ddb;
9977 			update_pipes &= ~BIT(pipe);
9978 
9979 			intel_update_crtc(state, crtc);
9980 
9981 			/*
9982 			 * If this is an already active pipe, it's DDB changed,
9983 			 * and this isn't the last pipe that needs updating
9984 			 * then we need to wait for a vblank to pass for the
9985 			 * new ddb allocation to take effect.
9986 			 */
9987 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
9988 						 &old_crtc_state->wm.skl.ddb) &&
9989 			    (update_pipes | modeset_pipes))
9990 				intel_wait_for_vblank(dev_priv, pipe);
9991 		}
9992 	}
9993 
9994 	update_pipes = modeset_pipes;
9995 
9996 	/*
9997 	 * Enable all pipes that needs a modeset and do not depends on other
9998 	 * pipes
9999 	 */
10000 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10001 		enum pipe pipe = crtc->pipe;
10002 
10003 		if ((modeset_pipes & BIT(pipe)) == 0)
10004 			continue;
10005 
10006 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10007 		    is_trans_port_sync_master(new_crtc_state) ||
10008 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10009 			continue;
10010 
10011 		modeset_pipes &= ~BIT(pipe);
10012 
10013 		intel_enable_crtc(state, crtc);
10014 	}
10015 
10016 	/*
10017 	 * Then we enable all remaining pipes that depend on other
10018 	 * pipes: MST slaves and port sync masters, big joiner master
10019 	 */
10020 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10021 		enum pipe pipe = crtc->pipe;
10022 
10023 		if ((modeset_pipes & BIT(pipe)) == 0)
10024 			continue;
10025 
10026 		modeset_pipes &= ~BIT(pipe);
10027 
10028 		intel_enable_crtc(state, crtc);
10029 	}
10030 
10031 	/*
10032 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
10033 	 */
10034 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10035 		enum pipe pipe = crtc->pipe;
10036 
10037 		if ((update_pipes & BIT(pipe)) == 0)
10038 			continue;
10039 
10040 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10041 									entries, I915_MAX_PIPES, pipe));
10042 
10043 		entries[pipe] = new_crtc_state->wm.skl.ddb;
10044 		update_pipes &= ~BIT(pipe);
10045 
10046 		intel_update_crtc(state, crtc);
10047 	}
10048 
10049 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10050 	drm_WARN_ON(&dev_priv->drm, update_pipes);
10051 }
10052 
10053 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10054 {
10055 	struct intel_atomic_state *state, *next;
10056 	struct llist_node *freed;
10057 
10058 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10059 	llist_for_each_entry_safe(state, next, freed, freed)
10060 		drm_atomic_state_put(&state->base);
10061 }
10062 
10063 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10064 {
10065 	struct drm_i915_private *dev_priv =
10066 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10067 
10068 	intel_atomic_helper_free_state(dev_priv);
10069 }
10070 
10071 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10072 {
10073 	struct wait_queue_entry wait_fence, wait_reset;
10074 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10075 
10076 	init_wait_entry(&wait_fence, 0);
10077 	init_wait_entry(&wait_reset, 0);
10078 	for (;;) {
10079 		prepare_to_wait(&intel_state->commit_ready.wait,
10080 				&wait_fence, TASK_UNINTERRUPTIBLE);
10081 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10082 					      I915_RESET_MODESET),
10083 				&wait_reset, TASK_UNINTERRUPTIBLE);
10084 
10085 
10086 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10087 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10088 			break;
10089 
10090 		schedule();
10091 	}
10092 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10093 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10094 				  I915_RESET_MODESET),
10095 		    &wait_reset);
10096 }
10097 
10098 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10099 {
10100 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10101 	struct intel_crtc *crtc;
10102 	int i;
10103 
10104 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10105 					    new_crtc_state, i)
10106 		intel_dsb_cleanup(old_crtc_state);
10107 }
10108 
10109 static void intel_atomic_cleanup_work(struct work_struct *work)
10110 {
10111 	struct intel_atomic_state *state =
10112 		container_of(work, struct intel_atomic_state, base.commit_work);
10113 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10114 
10115 	intel_cleanup_dsbs(state);
10116 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10117 	drm_atomic_helper_commit_cleanup_done(&state->base);
10118 	drm_atomic_state_put(&state->base);
10119 
10120 	intel_atomic_helper_free_state(i915);
10121 }
10122 
10123 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10124 {
10125 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10126 	struct intel_plane *plane;
10127 	struct intel_plane_state *plane_state;
10128 	int i;
10129 
10130 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10131 		struct drm_framebuffer *fb = plane_state->hw.fb;
10132 		int ret;
10133 
10134 		if (!fb ||
10135 		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10136 			continue;
10137 
10138 		/*
10139 		 * The layout of the fast clear color value expected by HW
10140 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10141 		 * - 4 x 4 bytes per-channel value
10142 		 *   (in surface type specific float/int format provided by the fb user)
10143 		 * - 8 bytes native color value used by the display
10144 		 *   (converted/written by GPU during a fast clear operation using the
10145 		 *    above per-channel values)
10146 		 *
10147 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10148 		 * caller made sure that the object is synced wrt. the related color clear value
10149 		 * GPU write on it.
10150 		 */
10151 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10152 						     fb->offsets[2] + 16,
10153 						     &plane_state->ccval,
10154 						     sizeof(plane_state->ccval));
10155 		/* The above could only fail if the FB obj has an unexpected backing store type. */
10156 		drm_WARN_ON(&i915->drm, ret);
10157 	}
10158 }
10159 
10160 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10161 {
10162 	struct drm_device *dev = state->base.dev;
10163 	struct drm_i915_private *dev_priv = to_i915(dev);
10164 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10165 	struct intel_crtc *crtc;
10166 	u64 put_domains[I915_MAX_PIPES] = {};
10167 	intel_wakeref_t wakeref = 0;
10168 	int i;
10169 
10170 	intel_atomic_commit_fence_wait(state);
10171 
10172 	drm_atomic_helper_wait_for_dependencies(&state->base);
10173 
10174 	if (state->modeset)
10175 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10176 
10177 	intel_atomic_prepare_plane_clear_colors(state);
10178 
10179 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10180 					    new_crtc_state, i) {
10181 		if (intel_crtc_needs_modeset(new_crtc_state) ||
10182 		    new_crtc_state->update_pipe) {
10183 
10184 			put_domains[crtc->pipe] =
10185 				modeset_get_crtc_power_domains(new_crtc_state);
10186 		}
10187 	}
10188 
10189 	intel_commit_modeset_disables(state);
10190 
10191 	/* FIXME: Eventually get rid of our crtc->config pointer */
10192 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10193 		crtc->config = new_crtc_state;
10194 
10195 	if (state->modeset) {
10196 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10197 
10198 		intel_set_cdclk_pre_plane_update(state);
10199 
10200 		intel_modeset_verify_disabled(dev_priv, state);
10201 	}
10202 
10203 	intel_sagv_pre_plane_update(state);
10204 
10205 	/* Complete the events for pipes that have now been disabled */
10206 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10207 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10208 
10209 		/* Complete events for now disable pipes here. */
10210 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10211 			spin_lock_irq(&dev->event_lock);
10212 			drm_crtc_send_vblank_event(&crtc->base,
10213 						   new_crtc_state->uapi.event);
10214 			spin_unlock_irq(&dev->event_lock);
10215 
10216 			new_crtc_state->uapi.event = NULL;
10217 		}
10218 	}
10219 
10220 	if (state->modeset)
10221 		intel_encoders_update_prepare(state);
10222 
10223 	intel_dbuf_pre_plane_update(state);
10224 
10225 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10226 		if (new_crtc_state->uapi.async_flip)
10227 			intel_crtc_enable_flip_done(state, crtc);
10228 	}
10229 
10230 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10231 	dev_priv->display.commit_modeset_enables(state);
10232 
10233 	if (state->modeset) {
10234 		intel_encoders_update_complete(state);
10235 
10236 		intel_set_cdclk_post_plane_update(state);
10237 	}
10238 
10239 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10240 	 * already, but still need the state for the delayed optimization. To
10241 	 * fix this:
10242 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10243 	 * - schedule that vblank worker _before_ calling hw_done
10244 	 * - at the start of commit_tail, cancel it _synchrously
10245 	 * - switch over to the vblank wait helper in the core after that since
10246 	 *   we don't need out special handling any more.
10247 	 */
10248 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10249 
10250 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10251 		if (new_crtc_state->uapi.async_flip)
10252 			intel_crtc_disable_flip_done(state, crtc);
10253 
10254 		if (new_crtc_state->hw.active &&
10255 		    !intel_crtc_needs_modeset(new_crtc_state) &&
10256 		    !new_crtc_state->preload_luts &&
10257 		    (new_crtc_state->uapi.color_mgmt_changed ||
10258 		     new_crtc_state->update_pipe))
10259 			intel_color_load_luts(new_crtc_state);
10260 	}
10261 
10262 	/*
10263 	 * Now that the vblank has passed, we can go ahead and program the
10264 	 * optimal watermarks on platforms that need two-step watermark
10265 	 * programming.
10266 	 *
10267 	 * TODO: Move this (and other cleanup) to an async worker eventually.
10268 	 */
10269 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10270 					    new_crtc_state, i) {
10271 		/*
10272 		 * Gen2 reports pipe underruns whenever all planes are disabled.
10273 		 * So re-enable underrun reporting after some planes get enabled.
10274 		 *
10275 		 * We do this before .optimize_watermarks() so that we have a
10276 		 * chance of catching underruns with the intermediate watermarks
10277 		 * vs. the new plane configuration.
10278 		 */
10279 		if (IS_DISPLAY_VER(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
10280 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10281 
10282 		if (dev_priv->display.optimize_watermarks)
10283 			dev_priv->display.optimize_watermarks(state, crtc);
10284 	}
10285 
10286 	intel_dbuf_post_plane_update(state);
10287 
10288 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10289 		intel_post_plane_update(state, crtc);
10290 
10291 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10292 
10293 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10294 
10295 		/*
10296 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
10297 		 * cleanup. So copy and reset the dsb structure to sync with
10298 		 * commit_done and later do dsb cleanup in cleanup_work.
10299 		 */
10300 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10301 	}
10302 
10303 	/* Underruns don't always raise interrupts, so check manually */
10304 	intel_check_cpu_fifo_underruns(dev_priv);
10305 	intel_check_pch_fifo_underruns(dev_priv);
10306 
10307 	if (state->modeset)
10308 		intel_verify_planes(state);
10309 
10310 	intel_sagv_post_plane_update(state);
10311 
10312 	drm_atomic_helper_commit_hw_done(&state->base);
10313 
10314 	if (state->modeset) {
10315 		/* As one of the primary mmio accessors, KMS has a high
10316 		 * likelihood of triggering bugs in unclaimed access. After we
10317 		 * finish modesetting, see if an error has been flagged, and if
10318 		 * so enable debugging for the next modeset - and hope we catch
10319 		 * the culprit.
10320 		 */
10321 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10322 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10323 	}
10324 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10325 
10326 	/*
10327 	 * Defer the cleanup of the old state to a separate worker to not
10328 	 * impede the current task (userspace for blocking modesets) that
10329 	 * are executed inline. For out-of-line asynchronous modesets/flips,
10330 	 * deferring to a new worker seems overkill, but we would place a
10331 	 * schedule point (cond_resched()) here anyway to keep latencies
10332 	 * down.
10333 	 */
10334 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10335 	queue_work(system_highpri_wq, &state->base.commit_work);
10336 }
10337 
10338 static void intel_atomic_commit_work(struct work_struct *work)
10339 {
10340 	struct intel_atomic_state *state =
10341 		container_of(work, struct intel_atomic_state, base.commit_work);
10342 
10343 	intel_atomic_commit_tail(state);
10344 }
10345 
10346 static int __i915_sw_fence_call
10347 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10348 			  enum i915_sw_fence_notify notify)
10349 {
10350 	struct intel_atomic_state *state =
10351 		container_of(fence, struct intel_atomic_state, commit_ready);
10352 
10353 	switch (notify) {
10354 	case FENCE_COMPLETE:
10355 		/* we do blocking waits in the worker, nothing to do here */
10356 		break;
10357 	case FENCE_FREE:
10358 		{
10359 			struct intel_atomic_helper *helper =
10360 				&to_i915(state->base.dev)->atomic_helper;
10361 
10362 			if (llist_add(&state->freed, &helper->free_list))
10363 				schedule_work(&helper->free_work);
10364 			break;
10365 		}
10366 	}
10367 
10368 	return NOTIFY_DONE;
10369 }
10370 
10371 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10372 {
10373 	struct intel_plane_state *old_plane_state, *new_plane_state;
10374 	struct intel_plane *plane;
10375 	int i;
10376 
10377 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10378 					     new_plane_state, i)
10379 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10380 					to_intel_frontbuffer(new_plane_state->hw.fb),
10381 					plane->frontbuffer_bit);
10382 }
10383 
10384 static int intel_atomic_commit(struct drm_device *dev,
10385 			       struct drm_atomic_state *_state,
10386 			       bool nonblock)
10387 {
10388 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10389 	struct drm_i915_private *dev_priv = to_i915(dev);
10390 	int ret = 0;
10391 
10392 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10393 
10394 	drm_atomic_state_get(&state->base);
10395 	i915_sw_fence_init(&state->commit_ready,
10396 			   intel_atomic_commit_ready);
10397 
10398 	/*
10399 	 * The intel_legacy_cursor_update() fast path takes care
10400 	 * of avoiding the vblank waits for simple cursor
10401 	 * movement and flips. For cursor on/off and size changes,
10402 	 * we want to perform the vblank waits so that watermark
10403 	 * updates happen during the correct frames. Gen9+ have
10404 	 * double buffered watermarks and so shouldn't need this.
10405 	 *
10406 	 * Unset state->legacy_cursor_update before the call to
10407 	 * drm_atomic_helper_setup_commit() because otherwise
10408 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
10409 	 * we get FIFO underruns because we didn't wait
10410 	 * for vblank.
10411 	 *
10412 	 * FIXME doing watermarks and fb cleanup from a vblank worker
10413 	 * (assuming we had any) would solve these problems.
10414 	 */
10415 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10416 		struct intel_crtc_state *new_crtc_state;
10417 		struct intel_crtc *crtc;
10418 		int i;
10419 
10420 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10421 			if (new_crtc_state->wm.need_postvbl_update ||
10422 			    new_crtc_state->update_wm_post)
10423 				state->base.legacy_cursor_update = false;
10424 	}
10425 
10426 	ret = intel_atomic_prepare_commit(state);
10427 	if (ret) {
10428 		drm_dbg_atomic(&dev_priv->drm,
10429 			       "Preparing state failed with %i\n", ret);
10430 		i915_sw_fence_commit(&state->commit_ready);
10431 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10432 		return ret;
10433 	}
10434 
10435 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10436 	if (!ret)
10437 		ret = drm_atomic_helper_swap_state(&state->base, true);
10438 	if (!ret)
10439 		intel_atomic_swap_global_state(state);
10440 
10441 	if (ret) {
10442 		struct intel_crtc_state *new_crtc_state;
10443 		struct intel_crtc *crtc;
10444 		int i;
10445 
10446 		i915_sw_fence_commit(&state->commit_ready);
10447 
10448 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10449 			intel_dsb_cleanup(new_crtc_state);
10450 
10451 		drm_atomic_helper_cleanup_planes(dev, &state->base);
10452 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10453 		return ret;
10454 	}
10455 	intel_shared_dpll_swap_state(state);
10456 	intel_atomic_track_fbs(state);
10457 
10458 	drm_atomic_state_get(&state->base);
10459 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10460 
10461 	i915_sw_fence_commit(&state->commit_ready);
10462 	if (nonblock && state->modeset) {
10463 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10464 	} else if (nonblock) {
10465 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
10466 	} else {
10467 		if (state->modeset)
10468 			flush_workqueue(dev_priv->modeset_wq);
10469 		intel_atomic_commit_tail(state);
10470 	}
10471 
10472 	return 0;
10473 }
10474 
10475 struct wait_rps_boost {
10476 	struct wait_queue_entry wait;
10477 
10478 	struct drm_crtc *crtc;
10479 	struct i915_request *request;
10480 };
10481 
10482 static int do_rps_boost(struct wait_queue_entry *_wait,
10483 			unsigned mode, int sync, void *key)
10484 {
10485 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10486 	struct i915_request *rq = wait->request;
10487 
10488 	/*
10489 	 * If we missed the vblank, but the request is already running it
10490 	 * is reasonable to assume that it will complete before the next
10491 	 * vblank without our intervention, so leave RPS alone.
10492 	 */
10493 	if (!i915_request_started(rq))
10494 		intel_rps_boost(rq);
10495 	i915_request_put(rq);
10496 
10497 	drm_crtc_vblank_put(wait->crtc);
10498 
10499 	list_del(&wait->wait.entry);
10500 	kfree(wait);
10501 	return 1;
10502 }
10503 
10504 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10505 				       struct dma_fence *fence)
10506 {
10507 	struct wait_rps_boost *wait;
10508 
10509 	if (!dma_fence_is_i915(fence))
10510 		return;
10511 
10512 	if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10513 		return;
10514 
10515 	if (drm_crtc_vblank_get(crtc))
10516 		return;
10517 
10518 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10519 	if (!wait) {
10520 		drm_crtc_vblank_put(crtc);
10521 		return;
10522 	}
10523 
10524 	wait->request = to_request(dma_fence_get(fence));
10525 	wait->crtc = crtc;
10526 
10527 	wait->wait.func = do_rps_boost;
10528 	wait->wait.flags = 0;
10529 
10530 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10531 }
10532 
10533 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10534 {
10535 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10536 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10537 	struct drm_framebuffer *fb = plane_state->hw.fb;
10538 	struct i915_vma *vma;
10539 	bool phys_cursor =
10540 		plane->id == PLANE_CURSOR &&
10541 		INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10542 
10543 	vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10544 					 &plane_state->view.gtt,
10545 					 intel_plane_uses_fence(plane_state),
10546 					 &plane_state->flags);
10547 	if (IS_ERR(vma))
10548 		return PTR_ERR(vma);
10549 
10550 	plane_state->vma = vma;
10551 
10552 	return 0;
10553 }
10554 
10555 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10556 {
10557 	struct i915_vma *vma;
10558 
10559 	vma = fetch_and_zero(&old_plane_state->vma);
10560 	if (vma)
10561 		intel_unpin_fb_vma(vma, old_plane_state->flags);
10562 }
10563 
10564 /**
10565  * intel_prepare_plane_fb - Prepare fb for usage on plane
10566  * @_plane: drm plane to prepare for
10567  * @_new_plane_state: the plane state being prepared
10568  *
10569  * Prepares a framebuffer for usage on a display plane.  Generally this
10570  * involves pinning the underlying object and updating the frontbuffer tracking
10571  * bits.  Some older platforms need special physical address handling for
10572  * cursor planes.
10573  *
10574  * Returns 0 on success, negative error code on failure.
10575  */
10576 int
10577 intel_prepare_plane_fb(struct drm_plane *_plane,
10578 		       struct drm_plane_state *_new_plane_state)
10579 {
10580 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
10581 	struct intel_plane *plane = to_intel_plane(_plane);
10582 	struct intel_plane_state *new_plane_state =
10583 		to_intel_plane_state(_new_plane_state);
10584 	struct intel_atomic_state *state =
10585 		to_intel_atomic_state(new_plane_state->uapi.state);
10586 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10587 	const struct intel_plane_state *old_plane_state =
10588 		intel_atomic_get_old_plane_state(state, plane);
10589 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
10590 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
10591 	int ret;
10592 
10593 	if (old_obj) {
10594 		const struct intel_crtc_state *crtc_state =
10595 			intel_atomic_get_new_crtc_state(state,
10596 							to_intel_crtc(old_plane_state->hw.crtc));
10597 
10598 		/* Big Hammer, we also need to ensure that any pending
10599 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
10600 		 * current scanout is retired before unpinning the old
10601 		 * framebuffer. Note that we rely on userspace rendering
10602 		 * into the buffer attached to the pipe they are waiting
10603 		 * on. If not, userspace generates a GPU hang with IPEHR
10604 		 * point to the MI_WAIT_FOR_EVENT.
10605 		 *
10606 		 * This should only fail upon a hung GPU, in which case we
10607 		 * can safely continue.
10608 		 */
10609 		if (intel_crtc_needs_modeset(crtc_state)) {
10610 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
10611 							      old_obj->base.resv, NULL,
10612 							      false, 0,
10613 							      GFP_KERNEL);
10614 			if (ret < 0)
10615 				return ret;
10616 		}
10617 	}
10618 
10619 	if (new_plane_state->uapi.fence) { /* explicit fencing */
10620 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
10621 					     &attr);
10622 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
10623 						    new_plane_state->uapi.fence,
10624 						    i915_fence_timeout(dev_priv),
10625 						    GFP_KERNEL);
10626 		if (ret < 0)
10627 			return ret;
10628 	}
10629 
10630 	if (!obj)
10631 		return 0;
10632 
10633 
10634 	ret = intel_plane_pin_fb(new_plane_state);
10635 	if (ret)
10636 		return ret;
10637 
10638 	i915_gem_object_wait_priority(obj, 0, &attr);
10639 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
10640 
10641 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
10642 		struct dma_fence *fence;
10643 
10644 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
10645 						      obj->base.resv, NULL,
10646 						      false,
10647 						      i915_fence_timeout(dev_priv),
10648 						      GFP_KERNEL);
10649 		if (ret < 0)
10650 			goto unpin_fb;
10651 
10652 		fence = dma_resv_get_excl_rcu(obj->base.resv);
10653 		if (fence) {
10654 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10655 						   fence);
10656 			dma_fence_put(fence);
10657 		}
10658 	} else {
10659 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10660 					   new_plane_state->uapi.fence);
10661 	}
10662 
10663 	/*
10664 	 * We declare pageflips to be interactive and so merit a small bias
10665 	 * towards upclocking to deliver the frame on time. By only changing
10666 	 * the RPS thresholds to sample more regularly and aim for higher
10667 	 * clocks we can hopefully deliver low power workloads (like kodi)
10668 	 * that are not quite steady state without resorting to forcing
10669 	 * maximum clocks following a vblank miss (see do_rps_boost()).
10670 	 */
10671 	if (!state->rps_interactive) {
10672 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
10673 		state->rps_interactive = true;
10674 	}
10675 
10676 	return 0;
10677 
10678 unpin_fb:
10679 	intel_plane_unpin_fb(new_plane_state);
10680 
10681 	return ret;
10682 }
10683 
10684 /**
10685  * intel_cleanup_plane_fb - Cleans up an fb after plane use
10686  * @plane: drm plane to clean up for
10687  * @_old_plane_state: the state from the previous modeset
10688  *
10689  * Cleans up a framebuffer that has just been removed from a plane.
10690  */
10691 void
10692 intel_cleanup_plane_fb(struct drm_plane *plane,
10693 		       struct drm_plane_state *_old_plane_state)
10694 {
10695 	struct intel_plane_state *old_plane_state =
10696 		to_intel_plane_state(_old_plane_state);
10697 	struct intel_atomic_state *state =
10698 		to_intel_atomic_state(old_plane_state->uapi.state);
10699 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
10700 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
10701 
10702 	if (!obj)
10703 		return;
10704 
10705 	if (state->rps_interactive) {
10706 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
10707 		state->rps_interactive = false;
10708 	}
10709 
10710 	/* Should only be called after a successful intel_prepare_plane_fb()! */
10711 	intel_plane_unpin_fb(old_plane_state);
10712 }
10713 
10714 /**
10715  * intel_plane_destroy - destroy a plane
10716  * @plane: plane to destroy
10717  *
10718  * Common destruction function for all types of planes (primary, cursor,
10719  * sprite).
10720  */
10721 void intel_plane_destroy(struct drm_plane *plane)
10722 {
10723 	drm_plane_cleanup(plane);
10724 	kfree(to_intel_plane(plane));
10725 }
10726 
10727 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
10728 {
10729 	struct intel_plane *plane;
10730 
10731 	for_each_intel_plane(&dev_priv->drm, plane) {
10732 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
10733 								  plane->pipe);
10734 
10735 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
10736 	}
10737 }
10738 
10739 
10740 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
10741 				      struct drm_file *file)
10742 {
10743 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10744 	struct drm_crtc *drmmode_crtc;
10745 	struct intel_crtc *crtc;
10746 
10747 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
10748 	if (!drmmode_crtc)
10749 		return -ENOENT;
10750 
10751 	crtc = to_intel_crtc(drmmode_crtc);
10752 	pipe_from_crtc_id->pipe = crtc->pipe;
10753 
10754 	return 0;
10755 }
10756 
10757 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
10758 {
10759 	struct drm_device *dev = encoder->base.dev;
10760 	struct intel_encoder *source_encoder;
10761 	u32 possible_clones = 0;
10762 
10763 	for_each_intel_encoder(dev, source_encoder) {
10764 		if (encoders_cloneable(encoder, source_encoder))
10765 			possible_clones |= drm_encoder_mask(&source_encoder->base);
10766 	}
10767 
10768 	return possible_clones;
10769 }
10770 
10771 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
10772 {
10773 	struct drm_device *dev = encoder->base.dev;
10774 	struct intel_crtc *crtc;
10775 	u32 possible_crtcs = 0;
10776 
10777 	for_each_intel_crtc(dev, crtc) {
10778 		if (encoder->pipe_mask & BIT(crtc->pipe))
10779 			possible_crtcs |= drm_crtc_mask(&crtc->base);
10780 	}
10781 
10782 	return possible_crtcs;
10783 }
10784 
10785 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
10786 {
10787 	if (!IS_MOBILE(dev_priv))
10788 		return false;
10789 
10790 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
10791 		return false;
10792 
10793 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
10794 		return false;
10795 
10796 	return true;
10797 }
10798 
10799 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
10800 {
10801 	if (DISPLAY_VER(dev_priv) >= 9)
10802 		return false;
10803 
10804 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
10805 		return false;
10806 
10807 	if (HAS_PCH_LPT_H(dev_priv) &&
10808 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
10809 		return false;
10810 
10811 	/* DDI E can't be used if DDI A requires 4 lanes */
10812 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
10813 		return false;
10814 
10815 	if (!dev_priv->vbt.int_crt_support)
10816 		return false;
10817 
10818 	return true;
10819 }
10820 
10821 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
10822 {
10823 	struct intel_encoder *encoder;
10824 	bool dpd_is_edp = false;
10825 
10826 	intel_pps_unlock_regs_wa(dev_priv);
10827 
10828 	if (!HAS_DISPLAY(dev_priv))
10829 		return;
10830 
10831 	if (IS_ALDERLAKE_S(dev_priv)) {
10832 		intel_ddi_init(dev_priv, PORT_A);
10833 		intel_ddi_init(dev_priv, PORT_TC1);
10834 		intel_ddi_init(dev_priv, PORT_TC2);
10835 		intel_ddi_init(dev_priv, PORT_TC3);
10836 		intel_ddi_init(dev_priv, PORT_TC4);
10837 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
10838 		intel_ddi_init(dev_priv, PORT_A);
10839 		intel_ddi_init(dev_priv, PORT_B);
10840 		intel_ddi_init(dev_priv, PORT_TC1);
10841 		intel_ddi_init(dev_priv, PORT_TC2);
10842 	} else if (DISPLAY_VER(dev_priv) >= 12) {
10843 		intel_ddi_init(dev_priv, PORT_A);
10844 		intel_ddi_init(dev_priv, PORT_B);
10845 		intel_ddi_init(dev_priv, PORT_TC1);
10846 		intel_ddi_init(dev_priv, PORT_TC2);
10847 		intel_ddi_init(dev_priv, PORT_TC3);
10848 		intel_ddi_init(dev_priv, PORT_TC4);
10849 		intel_ddi_init(dev_priv, PORT_TC5);
10850 		intel_ddi_init(dev_priv, PORT_TC6);
10851 		icl_dsi_init(dev_priv);
10852 	} else if (IS_JSL_EHL(dev_priv)) {
10853 		intel_ddi_init(dev_priv, PORT_A);
10854 		intel_ddi_init(dev_priv, PORT_B);
10855 		intel_ddi_init(dev_priv, PORT_C);
10856 		intel_ddi_init(dev_priv, PORT_D);
10857 		icl_dsi_init(dev_priv);
10858 	} else if (IS_DISPLAY_VER(dev_priv, 11)) {
10859 		intel_ddi_init(dev_priv, PORT_A);
10860 		intel_ddi_init(dev_priv, PORT_B);
10861 		intel_ddi_init(dev_priv, PORT_C);
10862 		intel_ddi_init(dev_priv, PORT_D);
10863 		intel_ddi_init(dev_priv, PORT_E);
10864 		/*
10865 		 * On some ICL SKUs port F is not present. No strap bits for
10866 		 * this, so rely on VBT.
10867 		 * Work around broken VBTs on SKUs known to have no port F.
10868 		 */
10869 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
10870 		    intel_bios_is_port_present(dev_priv, PORT_F))
10871 			intel_ddi_init(dev_priv, PORT_F);
10872 
10873 		icl_dsi_init(dev_priv);
10874 	} else if (IS_GEN9_LP(dev_priv)) {
10875 		/*
10876 		 * FIXME: Broxton doesn't support port detection via the
10877 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
10878 		 * detect the ports.
10879 		 */
10880 		intel_ddi_init(dev_priv, PORT_A);
10881 		intel_ddi_init(dev_priv, PORT_B);
10882 		intel_ddi_init(dev_priv, PORT_C);
10883 
10884 		vlv_dsi_init(dev_priv);
10885 	} else if (HAS_DDI(dev_priv)) {
10886 		int found;
10887 
10888 		if (intel_ddi_crt_present(dev_priv))
10889 			intel_crt_init(dev_priv);
10890 
10891 		/*
10892 		 * Haswell uses DDI functions to detect digital outputs.
10893 		 * On SKL pre-D0 the strap isn't connected. Later SKUs may or
10894 		 * may not have it - it was supposed to be fixed by the same
10895 		 * time we stopped using straps. Assume it's there.
10896 		 */
10897 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
10898 		/* WaIgnoreDDIAStrap: skl */
10899 		if (found || IS_GEN9_BC(dev_priv))
10900 			intel_ddi_init(dev_priv, PORT_A);
10901 
10902 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
10903 		 * register */
10904 		if (HAS_PCH_TGP(dev_priv)) {
10905 			/* W/A due to lack of STRAP config on TGP PCH*/
10906 			found = (SFUSE_STRAP_DDIB_DETECTED |
10907 				 SFUSE_STRAP_DDIC_DETECTED |
10908 				 SFUSE_STRAP_DDID_DETECTED);
10909 		} else {
10910 			found = intel_de_read(dev_priv, SFUSE_STRAP);
10911 		}
10912 
10913 		if (found & SFUSE_STRAP_DDIB_DETECTED)
10914 			intel_ddi_init(dev_priv, PORT_B);
10915 		if (found & SFUSE_STRAP_DDIC_DETECTED)
10916 			intel_ddi_init(dev_priv, PORT_C);
10917 		if (found & SFUSE_STRAP_DDID_DETECTED)
10918 			intel_ddi_init(dev_priv, PORT_D);
10919 		if (found & SFUSE_STRAP_DDIF_DETECTED)
10920 			intel_ddi_init(dev_priv, PORT_F);
10921 		/*
10922 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
10923 		 */
10924 		if (IS_GEN9_BC(dev_priv) &&
10925 		    intel_bios_is_port_present(dev_priv, PORT_E))
10926 			intel_ddi_init(dev_priv, PORT_E);
10927 
10928 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10929 		int found;
10930 
10931 		/*
10932 		 * intel_edp_init_connector() depends on this completing first,
10933 		 * to prevent the registration of both eDP and LVDS and the
10934 		 * incorrect sharing of the PPS.
10935 		 */
10936 		intel_lvds_init(dev_priv);
10937 		intel_crt_init(dev_priv);
10938 
10939 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
10940 
10941 		if (ilk_has_edp_a(dev_priv))
10942 			g4x_dp_init(dev_priv, DP_A, PORT_A);
10943 
10944 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
10945 			/* PCH SDVOB multiplex with HDMIB */
10946 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
10947 			if (!found)
10948 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
10949 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
10950 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
10951 		}
10952 
10953 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
10954 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
10955 
10956 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
10957 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
10958 
10959 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
10960 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
10961 
10962 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
10963 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
10964 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10965 		bool has_edp, has_port;
10966 
10967 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
10968 			intel_crt_init(dev_priv);
10969 
10970 		/*
10971 		 * The DP_DETECTED bit is the latched state of the DDC
10972 		 * SDA pin at boot. However since eDP doesn't require DDC
10973 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
10974 		 * eDP ports may have been muxed to an alternate function.
10975 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
10976 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
10977 		 * detect eDP ports.
10978 		 *
10979 		 * Sadly the straps seem to be missing sometimes even for HDMI
10980 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
10981 		 * and VBT for the presence of the port. Additionally we can't
10982 		 * trust the port type the VBT declares as we've seen at least
10983 		 * HDMI ports that the VBT claim are DP or eDP.
10984 		 */
10985 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
10986 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
10987 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
10988 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
10989 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
10990 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
10991 
10992 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
10993 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
10994 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
10995 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
10996 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
10997 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
10998 
10999 		if (IS_CHERRYVIEW(dev_priv)) {
11000 			/*
11001 			 * eDP not supported on port D,
11002 			 * so no need to worry about it
11003 			 */
11004 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11005 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11006 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11007 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11008 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11009 		}
11010 
11011 		vlv_dsi_init(dev_priv);
11012 	} else if (IS_PINEVIEW(dev_priv)) {
11013 		intel_lvds_init(dev_priv);
11014 		intel_crt_init(dev_priv);
11015 	} else if (IS_DISPLAY_RANGE(dev_priv, 3, 4)) {
11016 		bool found = false;
11017 
11018 		if (IS_MOBILE(dev_priv))
11019 			intel_lvds_init(dev_priv);
11020 
11021 		intel_crt_init(dev_priv);
11022 
11023 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11024 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11025 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11026 			if (!found && IS_G4X(dev_priv)) {
11027 				drm_dbg_kms(&dev_priv->drm,
11028 					    "probing HDMI on SDVOB\n");
11029 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11030 			}
11031 
11032 			if (!found && IS_G4X(dev_priv))
11033 				g4x_dp_init(dev_priv, DP_B, PORT_B);
11034 		}
11035 
11036 		/* Before G4X SDVOC doesn't have its own detect register */
11037 
11038 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11039 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11040 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11041 		}
11042 
11043 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11044 
11045 			if (IS_G4X(dev_priv)) {
11046 				drm_dbg_kms(&dev_priv->drm,
11047 					    "probing HDMI on SDVOC\n");
11048 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11049 			}
11050 			if (IS_G4X(dev_priv))
11051 				g4x_dp_init(dev_priv, DP_C, PORT_C);
11052 		}
11053 
11054 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11055 			g4x_dp_init(dev_priv, DP_D, PORT_D);
11056 
11057 		if (SUPPORTS_TV(dev_priv))
11058 			intel_tv_init(dev_priv);
11059 	} else if (IS_DISPLAY_VER(dev_priv, 2)) {
11060 		if (IS_I85X(dev_priv))
11061 			intel_lvds_init(dev_priv);
11062 
11063 		intel_crt_init(dev_priv);
11064 		intel_dvo_init(dev_priv);
11065 	}
11066 
11067 	for_each_intel_encoder(&dev_priv->drm, encoder) {
11068 		encoder->base.possible_crtcs =
11069 			intel_encoder_possible_crtcs(encoder);
11070 		encoder->base.possible_clones =
11071 			intel_encoder_possible_clones(encoder);
11072 	}
11073 
11074 	intel_init_pch_refclk(dev_priv);
11075 
11076 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11077 }
11078 
11079 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11080 {
11081 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11082 
11083 	drm_framebuffer_cleanup(fb);
11084 	intel_frontbuffer_put(intel_fb->frontbuffer);
11085 
11086 	kfree(intel_fb);
11087 }
11088 
11089 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11090 						struct drm_file *file,
11091 						unsigned int *handle)
11092 {
11093 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11094 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
11095 
11096 	if (i915_gem_object_is_userptr(obj)) {
11097 		drm_dbg(&i915->drm,
11098 			"attempting to use a userptr for a framebuffer, denied\n");
11099 		return -EINVAL;
11100 	}
11101 
11102 	return drm_gem_handle_create(file, &obj->base, handle);
11103 }
11104 
11105 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11106 					struct drm_file *file,
11107 					unsigned flags, unsigned color,
11108 					struct drm_clip_rect *clips,
11109 					unsigned num_clips)
11110 {
11111 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11112 
11113 	i915_gem_object_flush_if_display(obj);
11114 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11115 
11116 	return 0;
11117 }
11118 
11119 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11120 	.destroy = intel_user_framebuffer_destroy,
11121 	.create_handle = intel_user_framebuffer_create_handle,
11122 	.dirty = intel_user_framebuffer_dirty,
11123 };
11124 
11125 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11126 				  struct drm_i915_gem_object *obj,
11127 				  struct drm_mode_fb_cmd2 *mode_cmd)
11128 {
11129 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11130 	struct drm_framebuffer *fb = &intel_fb->base;
11131 	u32 max_stride;
11132 	unsigned int tiling, stride;
11133 	int ret = -EINVAL;
11134 	int i;
11135 
11136 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11137 	if (!intel_fb->frontbuffer)
11138 		return -ENOMEM;
11139 
11140 	i915_gem_object_lock(obj, NULL);
11141 	tiling = i915_gem_object_get_tiling(obj);
11142 	stride = i915_gem_object_get_stride(obj);
11143 	i915_gem_object_unlock(obj);
11144 
11145 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11146 		/*
11147 		 * If there's a fence, enforce that
11148 		 * the fb modifier and tiling mode match.
11149 		 */
11150 		if (tiling != I915_TILING_NONE &&
11151 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11152 			drm_dbg_kms(&dev_priv->drm,
11153 				    "tiling_mode doesn't match fb modifier\n");
11154 			goto err;
11155 		}
11156 	} else {
11157 		if (tiling == I915_TILING_X) {
11158 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11159 		} else if (tiling == I915_TILING_Y) {
11160 			drm_dbg_kms(&dev_priv->drm,
11161 				    "No Y tiling for legacy addfb\n");
11162 			goto err;
11163 		}
11164 	}
11165 
11166 	if (!drm_any_plane_has_format(&dev_priv->drm,
11167 				      mode_cmd->pixel_format,
11168 				      mode_cmd->modifier[0])) {
11169 		drm_dbg_kms(&dev_priv->drm,
11170 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
11171 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11172 		goto err;
11173 	}
11174 
11175 	/*
11176 	 * gen2/3 display engine uses the fence if present,
11177 	 * so the tiling mode must match the fb modifier exactly.
11178 	 */
11179 	if (DISPLAY_VER(dev_priv) < 4 &&
11180 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11181 		drm_dbg_kms(&dev_priv->drm,
11182 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
11183 		goto err;
11184 	}
11185 
11186 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11187 					 mode_cmd->modifier[0]);
11188 	if (mode_cmd->pitches[0] > max_stride) {
11189 		drm_dbg_kms(&dev_priv->drm,
11190 			    "%s pitch (%u) must be at most %d\n",
11191 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11192 			    "tiled" : "linear",
11193 			    mode_cmd->pitches[0], max_stride);
11194 		goto err;
11195 	}
11196 
11197 	/*
11198 	 * If there's a fence, enforce that
11199 	 * the fb pitch and fence stride match.
11200 	 */
11201 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11202 		drm_dbg_kms(&dev_priv->drm,
11203 			    "pitch (%d) must match tiling stride (%d)\n",
11204 			    mode_cmd->pitches[0], stride);
11205 		goto err;
11206 	}
11207 
11208 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11209 	if (mode_cmd->offsets[0] != 0) {
11210 		drm_dbg_kms(&dev_priv->drm,
11211 			    "plane 0 offset (0x%08x) must be 0\n",
11212 			    mode_cmd->offsets[0]);
11213 		goto err;
11214 	}
11215 
11216 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11217 
11218 	for (i = 0; i < fb->format->num_planes; i++) {
11219 		u32 stride_alignment;
11220 
11221 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11222 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11223 				    i);
11224 			goto err;
11225 		}
11226 
11227 		stride_alignment = intel_fb_stride_alignment(fb, i);
11228 		if (fb->pitches[i] & (stride_alignment - 1)) {
11229 			drm_dbg_kms(&dev_priv->drm,
11230 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
11231 				    i, fb->pitches[i], stride_alignment);
11232 			goto err;
11233 		}
11234 
11235 		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11236 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11237 
11238 			if (fb->pitches[i] != ccs_aux_stride) {
11239 				drm_dbg_kms(&dev_priv->drm,
11240 					    "ccs aux plane %d pitch (%d) must be %d\n",
11241 					    i,
11242 					    fb->pitches[i], ccs_aux_stride);
11243 				goto err;
11244 			}
11245 		}
11246 
11247 		fb->obj[i] = &obj->base;
11248 	}
11249 
11250 	ret = intel_fill_fb_info(dev_priv, fb);
11251 	if (ret)
11252 		goto err;
11253 
11254 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11255 	if (ret) {
11256 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11257 		goto err;
11258 	}
11259 
11260 	return 0;
11261 
11262 err:
11263 	intel_frontbuffer_put(intel_fb->frontbuffer);
11264 	return ret;
11265 }
11266 
11267 static struct drm_framebuffer *
11268 intel_user_framebuffer_create(struct drm_device *dev,
11269 			      struct drm_file *filp,
11270 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
11271 {
11272 	struct drm_framebuffer *fb;
11273 	struct drm_i915_gem_object *obj;
11274 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11275 
11276 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11277 	if (!obj)
11278 		return ERR_PTR(-ENOENT);
11279 
11280 	fb = intel_framebuffer_create(obj, &mode_cmd);
11281 	i915_gem_object_put(obj);
11282 
11283 	return fb;
11284 }
11285 
11286 static enum drm_mode_status
11287 intel_mode_valid(struct drm_device *dev,
11288 		 const struct drm_display_mode *mode)
11289 {
11290 	struct drm_i915_private *dev_priv = to_i915(dev);
11291 	int hdisplay_max, htotal_max;
11292 	int vdisplay_max, vtotal_max;
11293 
11294 	/*
11295 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11296 	 * of DBLSCAN modes to the output's mode list when they detect
11297 	 * the scaling mode property on the connector. And they don't
11298 	 * ask the kernel to validate those modes in any way until
11299 	 * modeset time at which point the client gets a protocol error.
11300 	 * So in order to not upset those clients we silently ignore the
11301 	 * DBLSCAN flag on such connectors. For other connectors we will
11302 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
11303 	 * And we always reject DBLSCAN modes in connector->mode_valid()
11304 	 * as we never want such modes on the connector's mode list.
11305 	 */
11306 
11307 	if (mode->vscan > 1)
11308 		return MODE_NO_VSCAN;
11309 
11310 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
11311 		return MODE_H_ILLEGAL;
11312 
11313 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11314 			   DRM_MODE_FLAG_NCSYNC |
11315 			   DRM_MODE_FLAG_PCSYNC))
11316 		return MODE_HSYNC;
11317 
11318 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
11319 			   DRM_MODE_FLAG_PIXMUX |
11320 			   DRM_MODE_FLAG_CLKDIV2))
11321 		return MODE_BAD;
11322 
11323 	/* Transcoder timing limits */
11324 	if (DISPLAY_VER(dev_priv) >= 11) {
11325 		hdisplay_max = 16384;
11326 		vdisplay_max = 8192;
11327 		htotal_max = 16384;
11328 		vtotal_max = 8192;
11329 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
11330 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11331 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11332 		vdisplay_max = 4096;
11333 		htotal_max = 8192;
11334 		vtotal_max = 8192;
11335 	} else if (DISPLAY_VER(dev_priv) >= 3) {
11336 		hdisplay_max = 4096;
11337 		vdisplay_max = 4096;
11338 		htotal_max = 8192;
11339 		vtotal_max = 8192;
11340 	} else {
11341 		hdisplay_max = 2048;
11342 		vdisplay_max = 2048;
11343 		htotal_max = 4096;
11344 		vtotal_max = 4096;
11345 	}
11346 
11347 	if (mode->hdisplay > hdisplay_max ||
11348 	    mode->hsync_start > htotal_max ||
11349 	    mode->hsync_end > htotal_max ||
11350 	    mode->htotal > htotal_max)
11351 		return MODE_H_ILLEGAL;
11352 
11353 	if (mode->vdisplay > vdisplay_max ||
11354 	    mode->vsync_start > vtotal_max ||
11355 	    mode->vsync_end > vtotal_max ||
11356 	    mode->vtotal > vtotal_max)
11357 		return MODE_V_ILLEGAL;
11358 
11359 	if (DISPLAY_VER(dev_priv) >= 5) {
11360 		if (mode->hdisplay < 64 ||
11361 		    mode->htotal - mode->hdisplay < 32)
11362 			return MODE_H_ILLEGAL;
11363 
11364 		if (mode->vtotal - mode->vdisplay < 5)
11365 			return MODE_V_ILLEGAL;
11366 	} else {
11367 		if (mode->htotal - mode->hdisplay < 32)
11368 			return MODE_H_ILLEGAL;
11369 
11370 		if (mode->vtotal - mode->vdisplay < 3)
11371 			return MODE_V_ILLEGAL;
11372 	}
11373 
11374 	return MODE_OK;
11375 }
11376 
11377 enum drm_mode_status
11378 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11379 				const struct drm_display_mode *mode,
11380 				bool bigjoiner)
11381 {
11382 	int plane_width_max, plane_height_max;
11383 
11384 	/*
11385 	 * intel_mode_valid() should be
11386 	 * sufficient on older platforms.
11387 	 */
11388 	if (DISPLAY_VER(dev_priv) < 9)
11389 		return MODE_OK;
11390 
11391 	/*
11392 	 * Most people will probably want a fullscreen
11393 	 * plane so let's not advertize modes that are
11394 	 * too big for that.
11395 	 */
11396 	if (DISPLAY_VER(dev_priv) >= 11) {
11397 		plane_width_max = 5120 << bigjoiner;
11398 		plane_height_max = 4320;
11399 	} else {
11400 		plane_width_max = 5120;
11401 		plane_height_max = 4096;
11402 	}
11403 
11404 	if (mode->hdisplay > plane_width_max)
11405 		return MODE_H_ILLEGAL;
11406 
11407 	if (mode->vdisplay > plane_height_max)
11408 		return MODE_V_ILLEGAL;
11409 
11410 	return MODE_OK;
11411 }
11412 
11413 static const struct drm_mode_config_funcs intel_mode_funcs = {
11414 	.fb_create = intel_user_framebuffer_create,
11415 	.get_format_info = intel_get_format_info,
11416 	.output_poll_changed = intel_fbdev_output_poll_changed,
11417 	.mode_valid = intel_mode_valid,
11418 	.atomic_check = intel_atomic_check,
11419 	.atomic_commit = intel_atomic_commit,
11420 	.atomic_state_alloc = intel_atomic_state_alloc,
11421 	.atomic_state_clear = intel_atomic_state_clear,
11422 	.atomic_state_free = intel_atomic_state_free,
11423 };
11424 
11425 /**
11426  * intel_init_display_hooks - initialize the display modesetting hooks
11427  * @dev_priv: device private
11428  */
11429 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11430 {
11431 	intel_init_cdclk_hooks(dev_priv);
11432 	intel_init_audio_hooks(dev_priv);
11433 
11434 	intel_dpll_init_clock_hook(dev_priv);
11435 
11436 	if (DISPLAY_VER(dev_priv) >= 9) {
11437 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11438 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11439 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11440 	} else if (HAS_DDI(dev_priv)) {
11441 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11442 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11443 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11444 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11445 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11446 		dev_priv->display.crtc_enable = ilk_crtc_enable;
11447 		dev_priv->display.crtc_disable = ilk_crtc_disable;
11448 	} else if (IS_CHERRYVIEW(dev_priv) ||
11449 		   IS_VALLEYVIEW(dev_priv)) {
11450 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11451 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
11452 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11453 	} else {
11454 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11455 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
11456 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11457 	}
11458 
11459 	intel_fdi_init_hook(dev_priv);
11460 
11461 	if (DISPLAY_VER(dev_priv) >= 9) {
11462 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11463 		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11464 	} else {
11465 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11466 		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11467 	}
11468 
11469 }
11470 
11471 void intel_modeset_init_hw(struct drm_i915_private *i915)
11472 {
11473 	struct intel_cdclk_state *cdclk_state =
11474 		to_intel_cdclk_state(i915->cdclk.obj.state);
11475 
11476 	intel_update_cdclk(i915);
11477 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11478 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11479 }
11480 
11481 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11482 {
11483 	struct drm_plane *plane;
11484 	struct intel_crtc *crtc;
11485 
11486 	for_each_intel_crtc(state->dev, crtc) {
11487 		struct intel_crtc_state *crtc_state;
11488 
11489 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
11490 		if (IS_ERR(crtc_state))
11491 			return PTR_ERR(crtc_state);
11492 
11493 		if (crtc_state->hw.active) {
11494 			/*
11495 			 * Preserve the inherited flag to avoid
11496 			 * taking the full modeset path.
11497 			 */
11498 			crtc_state->inherited = true;
11499 		}
11500 	}
11501 
11502 	drm_for_each_plane(plane, state->dev) {
11503 		struct drm_plane_state *plane_state;
11504 
11505 		plane_state = drm_atomic_get_plane_state(state, plane);
11506 		if (IS_ERR(plane_state))
11507 			return PTR_ERR(plane_state);
11508 	}
11509 
11510 	return 0;
11511 }
11512 
11513 /*
11514  * Calculate what we think the watermarks should be for the state we've read
11515  * out of the hardware and then immediately program those watermarks so that
11516  * we ensure the hardware settings match our internal state.
11517  *
11518  * We can calculate what we think WM's should be by creating a duplicate of the
11519  * current state (which was constructed during hardware readout) and running it
11520  * through the atomic check code to calculate new watermark values in the
11521  * state object.
11522  */
11523 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11524 {
11525 	struct drm_atomic_state *state;
11526 	struct intel_atomic_state *intel_state;
11527 	struct intel_crtc *crtc;
11528 	struct intel_crtc_state *crtc_state;
11529 	struct drm_modeset_acquire_ctx ctx;
11530 	int ret;
11531 	int i;
11532 
11533 	/* Only supported on platforms that use atomic watermark design */
11534 	if (!dev_priv->display.optimize_watermarks)
11535 		return;
11536 
11537 	state = drm_atomic_state_alloc(&dev_priv->drm);
11538 	if (drm_WARN_ON(&dev_priv->drm, !state))
11539 		return;
11540 
11541 	intel_state = to_intel_atomic_state(state);
11542 
11543 	drm_modeset_acquire_init(&ctx, 0);
11544 
11545 retry:
11546 	state->acquire_ctx = &ctx;
11547 
11548 	/*
11549 	 * Hardware readout is the only time we don't want to calculate
11550 	 * intermediate watermarks (since we don't trust the current
11551 	 * watermarks).
11552 	 */
11553 	if (!HAS_GMCH(dev_priv))
11554 		intel_state->skip_intermediate_wm = true;
11555 
11556 	ret = sanitize_watermarks_add_affected(state);
11557 	if (ret)
11558 		goto fail;
11559 
11560 	ret = intel_atomic_check(&dev_priv->drm, state);
11561 	if (ret)
11562 		goto fail;
11563 
11564 	/* Write calculated watermark values back */
11565 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
11566 		crtc_state->wm.need_postvbl_update = true;
11567 		dev_priv->display.optimize_watermarks(intel_state, crtc);
11568 
11569 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
11570 	}
11571 
11572 fail:
11573 	if (ret == -EDEADLK) {
11574 		drm_atomic_state_clear(state);
11575 		drm_modeset_backoff(&ctx);
11576 		goto retry;
11577 	}
11578 
11579 	/*
11580 	 * If we fail here, it means that the hardware appears to be
11581 	 * programmed in a way that shouldn't be possible, given our
11582 	 * understanding of watermark requirements.  This might mean a
11583 	 * mistake in the hardware readout code or a mistake in the
11584 	 * watermark calculations for a given platform.  Raise a WARN
11585 	 * so that this is noticeable.
11586 	 *
11587 	 * If this actually happens, we'll have to just leave the
11588 	 * BIOS-programmed watermarks untouched and hope for the best.
11589 	 */
11590 	drm_WARN(&dev_priv->drm, ret,
11591 		 "Could not determine valid watermarks for inherited state\n");
11592 
11593 	drm_atomic_state_put(state);
11594 
11595 	drm_modeset_drop_locks(&ctx);
11596 	drm_modeset_acquire_fini(&ctx);
11597 }
11598 
11599 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
11600 {
11601 	if (IS_IRONLAKE(dev_priv)) {
11602 		u32 fdi_pll_clk =
11603 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
11604 
11605 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
11606 	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
11607 		dev_priv->fdi_pll_freq = 270000;
11608 	} else {
11609 		return;
11610 	}
11611 
11612 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
11613 }
11614 
11615 static int intel_initial_commit(struct drm_device *dev)
11616 {
11617 	struct drm_atomic_state *state = NULL;
11618 	struct drm_modeset_acquire_ctx ctx;
11619 	struct intel_crtc *crtc;
11620 	int ret = 0;
11621 
11622 	state = drm_atomic_state_alloc(dev);
11623 	if (!state)
11624 		return -ENOMEM;
11625 
11626 	drm_modeset_acquire_init(&ctx, 0);
11627 
11628 retry:
11629 	state->acquire_ctx = &ctx;
11630 
11631 	for_each_intel_crtc(dev, crtc) {
11632 		struct intel_crtc_state *crtc_state =
11633 			intel_atomic_get_crtc_state(state, crtc);
11634 
11635 		if (IS_ERR(crtc_state)) {
11636 			ret = PTR_ERR(crtc_state);
11637 			goto out;
11638 		}
11639 
11640 		if (crtc_state->hw.active) {
11641 			struct intel_encoder *encoder;
11642 
11643 			/*
11644 			 * We've not yet detected sink capabilities
11645 			 * (audio,infoframes,etc.) and thus we don't want to
11646 			 * force a full state recomputation yet. We want that to
11647 			 * happen only for the first real commit from userspace.
11648 			 * So preserve the inherited flag for the time being.
11649 			 */
11650 			crtc_state->inherited = true;
11651 
11652 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
11653 			if (ret)
11654 				goto out;
11655 
11656 			/*
11657 			 * FIXME hack to force a LUT update to avoid the
11658 			 * plane update forcing the pipe gamma on without
11659 			 * having a proper LUT loaded. Remove once we
11660 			 * have readout for pipe gamma enable.
11661 			 */
11662 			crtc_state->uapi.color_mgmt_changed = true;
11663 
11664 			for_each_intel_encoder_mask(dev, encoder,
11665 						    crtc_state->uapi.encoder_mask) {
11666 				if (encoder->initial_fastset_check &&
11667 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
11668 					ret = drm_atomic_add_affected_connectors(state,
11669 										 &crtc->base);
11670 					if (ret)
11671 						goto out;
11672 				}
11673 			}
11674 		}
11675 	}
11676 
11677 	ret = drm_atomic_commit(state);
11678 
11679 out:
11680 	if (ret == -EDEADLK) {
11681 		drm_atomic_state_clear(state);
11682 		drm_modeset_backoff(&ctx);
11683 		goto retry;
11684 	}
11685 
11686 	drm_atomic_state_put(state);
11687 
11688 	drm_modeset_drop_locks(&ctx);
11689 	drm_modeset_acquire_fini(&ctx);
11690 
11691 	return ret;
11692 }
11693 
11694 static void intel_mode_config_init(struct drm_i915_private *i915)
11695 {
11696 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
11697 
11698 	drm_mode_config_init(&i915->drm);
11699 	INIT_LIST_HEAD(&i915->global_obj_list);
11700 
11701 	mode_config->min_width = 0;
11702 	mode_config->min_height = 0;
11703 
11704 	mode_config->preferred_depth = 24;
11705 	mode_config->prefer_shadow = 1;
11706 
11707 	mode_config->allow_fb_modifiers = true;
11708 
11709 	mode_config->funcs = &intel_mode_funcs;
11710 
11711 	mode_config->async_page_flip = has_async_flips(i915);
11712 
11713 	/*
11714 	 * Maximum framebuffer dimensions, chosen to match
11715 	 * the maximum render engine surface size on gen4+.
11716 	 */
11717 	if (DISPLAY_VER(i915) >= 7) {
11718 		mode_config->max_width = 16384;
11719 		mode_config->max_height = 16384;
11720 	} else if (DISPLAY_VER(i915) >= 4) {
11721 		mode_config->max_width = 8192;
11722 		mode_config->max_height = 8192;
11723 	} else if (IS_DISPLAY_VER(i915, 3)) {
11724 		mode_config->max_width = 4096;
11725 		mode_config->max_height = 4096;
11726 	} else {
11727 		mode_config->max_width = 2048;
11728 		mode_config->max_height = 2048;
11729 	}
11730 
11731 	if (IS_I845G(i915) || IS_I865G(i915)) {
11732 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
11733 		mode_config->cursor_height = 1023;
11734 	} else if (IS_I830(i915) || IS_I85X(i915) ||
11735 		   IS_I915G(i915) || IS_I915GM(i915)) {
11736 		mode_config->cursor_width = 64;
11737 		mode_config->cursor_height = 64;
11738 	} else {
11739 		mode_config->cursor_width = 256;
11740 		mode_config->cursor_height = 256;
11741 	}
11742 }
11743 
11744 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
11745 {
11746 	intel_atomic_global_obj_cleanup(i915);
11747 	drm_mode_config_cleanup(&i915->drm);
11748 }
11749 
11750 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
11751 {
11752 	if (plane_config->fb) {
11753 		struct drm_framebuffer *fb = &plane_config->fb->base;
11754 
11755 		/* We may only have the stub and not a full framebuffer */
11756 		if (drm_framebuffer_read_refcount(fb))
11757 			drm_framebuffer_put(fb);
11758 		else
11759 			kfree(fb);
11760 	}
11761 
11762 	if (plane_config->vma)
11763 		i915_vma_put(plane_config->vma);
11764 }
11765 
11766 /* part #1: call before irq install */
11767 int intel_modeset_init_noirq(struct drm_i915_private *i915)
11768 {
11769 	int ret;
11770 
11771 	if (i915_inject_probe_failure(i915))
11772 		return -ENODEV;
11773 
11774 	if (HAS_DISPLAY(i915)) {
11775 		ret = drm_vblank_init(&i915->drm,
11776 				      INTEL_NUM_PIPES(i915));
11777 		if (ret)
11778 			return ret;
11779 	}
11780 
11781 	intel_bios_init(i915);
11782 
11783 	ret = intel_vga_register(i915);
11784 	if (ret)
11785 		goto cleanup_bios;
11786 
11787 	/* FIXME: completely on the wrong abstraction layer */
11788 	intel_power_domains_init_hw(i915, false);
11789 
11790 	intel_csr_ucode_init(i915);
11791 
11792 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
11793 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
11794 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
11795 
11796 	i915->framestart_delay = 1; /* 1-4 */
11797 
11798 	intel_mode_config_init(i915);
11799 
11800 	ret = intel_cdclk_init(i915);
11801 	if (ret)
11802 		goto cleanup_vga_client_pw_domain_csr;
11803 
11804 	ret = intel_dbuf_init(i915);
11805 	if (ret)
11806 		goto cleanup_vga_client_pw_domain_csr;
11807 
11808 	ret = intel_bw_init(i915);
11809 	if (ret)
11810 		goto cleanup_vga_client_pw_domain_csr;
11811 
11812 	init_llist_head(&i915->atomic_helper.free_list);
11813 	INIT_WORK(&i915->atomic_helper.free_work,
11814 		  intel_atomic_helper_free_state_worker);
11815 
11816 	intel_init_quirks(i915);
11817 
11818 	intel_fbc_init(i915);
11819 
11820 	return 0;
11821 
11822 cleanup_vga_client_pw_domain_csr:
11823 	intel_csr_ucode_fini(i915);
11824 	intel_power_domains_driver_remove(i915);
11825 	intel_vga_unregister(i915);
11826 cleanup_bios:
11827 	intel_bios_driver_remove(i915);
11828 
11829 	return ret;
11830 }
11831 
11832 /* part #2: call after irq install, but before gem init */
11833 int intel_modeset_init_nogem(struct drm_i915_private *i915)
11834 {
11835 	struct drm_device *dev = &i915->drm;
11836 	enum pipe pipe;
11837 	struct intel_crtc *crtc;
11838 	int ret;
11839 
11840 	intel_init_pm(i915);
11841 
11842 	intel_panel_sanitize_ssc(i915);
11843 
11844 	intel_pps_setup(i915);
11845 
11846 	intel_gmbus_setup(i915);
11847 
11848 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
11849 		    INTEL_NUM_PIPES(i915),
11850 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
11851 
11852 	if (HAS_DISPLAY(i915)) {
11853 		for_each_pipe(i915, pipe) {
11854 			ret = intel_crtc_init(i915, pipe);
11855 			if (ret) {
11856 				intel_mode_config_cleanup(i915);
11857 				return ret;
11858 			}
11859 		}
11860 	}
11861 
11862 	intel_plane_possible_crtcs_init(i915);
11863 	intel_shared_dpll_init(dev);
11864 	intel_update_fdi_pll_freq(i915);
11865 
11866 	intel_update_czclk(i915);
11867 	intel_modeset_init_hw(i915);
11868 	intel_dpll_update_ref_clks(i915);
11869 
11870 	intel_hdcp_component_init(i915);
11871 
11872 	if (i915->max_cdclk_freq == 0)
11873 		intel_update_max_cdclk(i915);
11874 
11875 	/*
11876 	 * If the platform has HTI, we need to find out whether it has reserved
11877 	 * any display resources before we create our display outputs.
11878 	 */
11879 	if (INTEL_INFO(i915)->display.has_hti)
11880 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
11881 
11882 	/* Just disable it once at startup */
11883 	intel_vga_disable(i915);
11884 	intel_setup_outputs(i915);
11885 
11886 	drm_modeset_lock_all(dev);
11887 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
11888 	drm_modeset_unlock_all(dev);
11889 
11890 	for_each_intel_crtc(dev, crtc) {
11891 		struct intel_initial_plane_config plane_config = {};
11892 
11893 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
11894 			continue;
11895 
11896 		/*
11897 		 * Note that reserving the BIOS fb up front prevents us
11898 		 * from stuffing other stolen allocations like the ring
11899 		 * on top.  This prevents some ugliness at boot time, and
11900 		 * can even allow for smooth boot transitions if the BIOS
11901 		 * fb is large enough for the active pipe configuration.
11902 		 */
11903 		i915->display.get_initial_plane_config(crtc, &plane_config);
11904 
11905 		/*
11906 		 * If the fb is shared between multiple heads, we'll
11907 		 * just get the first one.
11908 		 */
11909 		intel_find_initial_plane_obj(crtc, &plane_config);
11910 
11911 		plane_config_fini(&plane_config);
11912 	}
11913 
11914 	/*
11915 	 * Make sure hardware watermarks really match the state we read out.
11916 	 * Note that we need to do this after reconstructing the BIOS fb's
11917 	 * since the watermark calculation done here will use pstate->fb.
11918 	 */
11919 	if (!HAS_GMCH(i915))
11920 		sanitize_watermarks(i915);
11921 
11922 	return 0;
11923 }
11924 
11925 /* part #3: call after gem init */
11926 int intel_modeset_init(struct drm_i915_private *i915)
11927 {
11928 	int ret;
11929 
11930 	if (!HAS_DISPLAY(i915))
11931 		return 0;
11932 
11933 	/*
11934 	 * Force all active planes to recompute their states. So that on
11935 	 * mode_setcrtc after probe, all the intel_plane_state variables
11936 	 * are already calculated and there is no assert_plane warnings
11937 	 * during bootup.
11938 	 */
11939 	ret = intel_initial_commit(&i915->drm);
11940 	if (ret)
11941 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
11942 
11943 	intel_overlay_setup(i915);
11944 
11945 	ret = intel_fbdev_init(&i915->drm);
11946 	if (ret)
11947 		return ret;
11948 
11949 	/* Only enable hotplug handling once the fbdev is fully set up. */
11950 	intel_hpd_init(i915);
11951 	intel_hpd_poll_disable(i915);
11952 
11953 	intel_init_ipc(i915);
11954 
11955 	return 0;
11956 }
11957 
11958 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11959 {
11960 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11961 	/* 640x480@60Hz, ~25175 kHz */
11962 	struct dpll clock = {
11963 		.m1 = 18,
11964 		.m2 = 7,
11965 		.p1 = 13,
11966 		.p2 = 4,
11967 		.n = 2,
11968 	};
11969 	u32 dpll, fp;
11970 	int i;
11971 
11972 	drm_WARN_ON(&dev_priv->drm,
11973 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
11974 
11975 	drm_dbg_kms(&dev_priv->drm,
11976 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
11977 		    pipe_name(pipe), clock.vco, clock.dot);
11978 
11979 	fp = i9xx_dpll_compute_fp(&clock);
11980 	dpll = DPLL_DVO_2X_MODE |
11981 		DPLL_VGA_MODE_DIS |
11982 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
11983 		PLL_P2_DIVIDE_BY_4 |
11984 		PLL_REF_INPUT_DREFCLK |
11985 		DPLL_VCO_ENABLE;
11986 
11987 	intel_de_write(dev_priv, FP0(pipe), fp);
11988 	intel_de_write(dev_priv, FP1(pipe), fp);
11989 
11990 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
11991 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
11992 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
11993 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
11994 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
11995 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
11996 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
11997 
11998 	/*
11999 	 * Apparently we need to have VGA mode enabled prior to changing
12000 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12001 	 * dividers, even though the register value does change.
12002 	 */
12003 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12004 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12005 
12006 	/* Wait for the clocks to stabilize. */
12007 	intel_de_posting_read(dev_priv, DPLL(pipe));
12008 	udelay(150);
12009 
12010 	/* The pixel multiplier can only be updated once the
12011 	 * DPLL is enabled and the clocks are stable.
12012 	 *
12013 	 * So write it again.
12014 	 */
12015 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12016 
12017 	/* We do this three times for luck */
12018 	for (i = 0; i < 3 ; i++) {
12019 		intel_de_write(dev_priv, DPLL(pipe), dpll);
12020 		intel_de_posting_read(dev_priv, DPLL(pipe));
12021 		udelay(150); /* wait for warmup */
12022 	}
12023 
12024 	intel_de_write(dev_priv, PIPECONF(pipe),
12025 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12026 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12027 
12028 	intel_wait_for_pipe_scanline_moving(crtc);
12029 }
12030 
12031 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12032 {
12033 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12034 
12035 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12036 		    pipe_name(pipe));
12037 
12038 	drm_WARN_ON(&dev_priv->drm,
12039 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12040 		    DISPLAY_PLANE_ENABLE);
12041 	drm_WARN_ON(&dev_priv->drm,
12042 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12043 		    DISPLAY_PLANE_ENABLE);
12044 	drm_WARN_ON(&dev_priv->drm,
12045 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12046 		    DISPLAY_PLANE_ENABLE);
12047 	drm_WARN_ON(&dev_priv->drm,
12048 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12049 	drm_WARN_ON(&dev_priv->drm,
12050 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12051 
12052 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
12053 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12054 
12055 	intel_wait_for_pipe_scanline_stopped(crtc);
12056 
12057 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12058 	intel_de_posting_read(dev_priv, DPLL(pipe));
12059 }
12060 
12061 static void
12062 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12063 {
12064 	struct intel_crtc *crtc;
12065 
12066 	if (DISPLAY_VER(dev_priv) >= 4)
12067 		return;
12068 
12069 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12070 		struct intel_plane *plane =
12071 			to_intel_plane(crtc->base.primary);
12072 		struct intel_crtc *plane_crtc;
12073 		enum pipe pipe;
12074 
12075 		if (!plane->get_hw_state(plane, &pipe))
12076 			continue;
12077 
12078 		if (pipe == crtc->pipe)
12079 			continue;
12080 
12081 		drm_dbg_kms(&dev_priv->drm,
12082 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12083 			    plane->base.base.id, plane->base.name);
12084 
12085 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12086 		intel_plane_disable_noatomic(plane_crtc, plane);
12087 	}
12088 }
12089 
12090 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12091 {
12092 	struct drm_device *dev = crtc->base.dev;
12093 	struct intel_encoder *encoder;
12094 
12095 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12096 		return true;
12097 
12098 	return false;
12099 }
12100 
12101 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12102 {
12103 	struct drm_device *dev = encoder->base.dev;
12104 	struct intel_connector *connector;
12105 
12106 	for_each_connector_on_encoder(dev, &encoder->base, connector)
12107 		return connector;
12108 
12109 	return NULL;
12110 }
12111 
12112 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12113 			      enum pipe pch_transcoder)
12114 {
12115 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12116 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12117 }
12118 
12119 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12120 {
12121 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12122 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12123 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12124 
12125 	if (DISPLAY_VER(dev_priv) >= 9 ||
12126 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12127 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12128 		u32 val;
12129 
12130 		if (transcoder_is_dsi(cpu_transcoder))
12131 			return;
12132 
12133 		val = intel_de_read(dev_priv, reg);
12134 		val &= ~HSW_FRAME_START_DELAY_MASK;
12135 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12136 		intel_de_write(dev_priv, reg, val);
12137 	} else {
12138 		i915_reg_t reg = PIPECONF(cpu_transcoder);
12139 		u32 val;
12140 
12141 		val = intel_de_read(dev_priv, reg);
12142 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12143 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12144 		intel_de_write(dev_priv, reg, val);
12145 	}
12146 
12147 	if (!crtc_state->has_pch_encoder)
12148 		return;
12149 
12150 	if (HAS_PCH_IBX(dev_priv)) {
12151 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12152 		u32 val;
12153 
12154 		val = intel_de_read(dev_priv, reg);
12155 		val &= ~TRANS_FRAME_START_DELAY_MASK;
12156 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12157 		intel_de_write(dev_priv, reg, val);
12158 	} else {
12159 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12160 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12161 		u32 val;
12162 
12163 		val = intel_de_read(dev_priv, reg);
12164 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12165 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12166 		intel_de_write(dev_priv, reg, val);
12167 	}
12168 }
12169 
12170 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12171 				struct drm_modeset_acquire_ctx *ctx)
12172 {
12173 	struct drm_device *dev = crtc->base.dev;
12174 	struct drm_i915_private *dev_priv = to_i915(dev);
12175 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12176 
12177 	if (crtc_state->hw.active) {
12178 		struct intel_plane *plane;
12179 
12180 		/* Clear any frame start delays used for debugging left by the BIOS */
12181 		intel_sanitize_frame_start_delay(crtc_state);
12182 
12183 		/* Disable everything but the primary plane */
12184 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
12185 			const struct intel_plane_state *plane_state =
12186 				to_intel_plane_state(plane->base.state);
12187 
12188 			if (plane_state->uapi.visible &&
12189 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12190 				intel_plane_disable_noatomic(crtc, plane);
12191 		}
12192 
12193 		/*
12194 		 * Disable any background color set by the BIOS, but enable the
12195 		 * gamma and CSC to match how we program our planes.
12196 		 */
12197 		if (DISPLAY_VER(dev_priv) >= 9)
12198 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12199 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12200 	}
12201 
12202 	/* Adjust the state of the output pipe according to whether we
12203 	 * have active connectors/encoders. */
12204 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12205 	    !crtc_state->bigjoiner_slave)
12206 		intel_crtc_disable_noatomic(crtc, ctx);
12207 
12208 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12209 		/*
12210 		 * We start out with underrun reporting disabled to avoid races.
12211 		 * For correct bookkeeping mark this on active crtcs.
12212 		 *
12213 		 * Also on gmch platforms we dont have any hardware bits to
12214 		 * disable the underrun reporting. Which means we need to start
12215 		 * out with underrun reporting disabled also on inactive pipes,
12216 		 * since otherwise we'll complain about the garbage we read when
12217 		 * e.g. coming up after runtime pm.
12218 		 *
12219 		 * No protection against concurrent access is required - at
12220 		 * worst a fifo underrun happens which also sets this to false.
12221 		 */
12222 		crtc->cpu_fifo_underrun_disabled = true;
12223 		/*
12224 		 * We track the PCH trancoder underrun reporting state
12225 		 * within the crtc. With crtc for pipe A housing the underrun
12226 		 * reporting state for PCH transcoder A, crtc for pipe B housing
12227 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12228 		 * and marking underrun reporting as disabled for the non-existing
12229 		 * PCH transcoders B and C would prevent enabling the south
12230 		 * error interrupt (see cpt_can_enable_serr_int()).
12231 		 */
12232 		if (has_pch_trancoder(dev_priv, crtc->pipe))
12233 			crtc->pch_fifo_underrun_disabled = true;
12234 	}
12235 }
12236 
12237 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12238 {
12239 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12240 
12241 	/*
12242 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12243 	 * the hardware when a high res displays plugged in. DPLL P
12244 	 * divider is zero, and the pipe timings are bonkers. We'll
12245 	 * try to disable everything in that case.
12246 	 *
12247 	 * FIXME would be nice to be able to sanitize this state
12248 	 * without several WARNs, but for now let's take the easy
12249 	 * road.
12250 	 */
12251 	return IS_SANDYBRIDGE(dev_priv) &&
12252 		crtc_state->hw.active &&
12253 		crtc_state->shared_dpll &&
12254 		crtc_state->port_clock == 0;
12255 }
12256 
12257 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12258 {
12259 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12260 	struct intel_connector *connector;
12261 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12262 	struct intel_crtc_state *crtc_state = crtc ?
12263 		to_intel_crtc_state(crtc->base.state) : NULL;
12264 
12265 	/* We need to check both for a crtc link (meaning that the
12266 	 * encoder is active and trying to read from a pipe) and the
12267 	 * pipe itself being active. */
12268 	bool has_active_crtc = crtc_state &&
12269 		crtc_state->hw.active;
12270 
12271 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12272 		drm_dbg_kms(&dev_priv->drm,
12273 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12274 			    pipe_name(crtc->pipe));
12275 		has_active_crtc = false;
12276 	}
12277 
12278 	connector = intel_encoder_find_connector(encoder);
12279 	if (connector && !has_active_crtc) {
12280 		drm_dbg_kms(&dev_priv->drm,
12281 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12282 			    encoder->base.base.id,
12283 			    encoder->base.name);
12284 
12285 		/* Connector is active, but has no active pipe. This is
12286 		 * fallout from our resume register restoring. Disable
12287 		 * the encoder manually again. */
12288 		if (crtc_state) {
12289 			struct drm_encoder *best_encoder;
12290 
12291 			drm_dbg_kms(&dev_priv->drm,
12292 				    "[ENCODER:%d:%s] manually disabled\n",
12293 				    encoder->base.base.id,
12294 				    encoder->base.name);
12295 
12296 			/* avoid oopsing in case the hooks consult best_encoder */
12297 			best_encoder = connector->base.state->best_encoder;
12298 			connector->base.state->best_encoder = &encoder->base;
12299 
12300 			/* FIXME NULL atomic state passed! */
12301 			if (encoder->disable)
12302 				encoder->disable(NULL, encoder, crtc_state,
12303 						 connector->base.state);
12304 			if (encoder->post_disable)
12305 				encoder->post_disable(NULL, encoder, crtc_state,
12306 						      connector->base.state);
12307 
12308 			connector->base.state->best_encoder = best_encoder;
12309 		}
12310 		encoder->base.crtc = NULL;
12311 
12312 		/* Inconsistent output/port/pipe state happens presumably due to
12313 		 * a bug in one of the get_hw_state functions. Or someplace else
12314 		 * in our code, like the register restore mess on resume. Clamp
12315 		 * things to off as a safer default. */
12316 
12317 		connector->base.dpms = DRM_MODE_DPMS_OFF;
12318 		connector->base.encoder = NULL;
12319 	}
12320 
12321 	/* notify opregion of the sanitized encoder state */
12322 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12323 
12324 	if (HAS_DDI(dev_priv))
12325 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
12326 }
12327 
12328 /* FIXME read out full plane state for all planes */
12329 static void readout_plane_state(struct drm_i915_private *dev_priv)
12330 {
12331 	struct intel_plane *plane;
12332 	struct intel_crtc *crtc;
12333 
12334 	for_each_intel_plane(&dev_priv->drm, plane) {
12335 		struct intel_plane_state *plane_state =
12336 			to_intel_plane_state(plane->base.state);
12337 		struct intel_crtc_state *crtc_state;
12338 		enum pipe pipe = PIPE_A;
12339 		bool visible;
12340 
12341 		visible = plane->get_hw_state(plane, &pipe);
12342 
12343 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12344 		crtc_state = to_intel_crtc_state(crtc->base.state);
12345 
12346 		intel_set_plane_visible(crtc_state, plane_state, visible);
12347 
12348 		drm_dbg_kms(&dev_priv->drm,
12349 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12350 			    plane->base.base.id, plane->base.name,
12351 			    enableddisabled(visible), pipe_name(pipe));
12352 	}
12353 
12354 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12355 		struct intel_crtc_state *crtc_state =
12356 			to_intel_crtc_state(crtc->base.state);
12357 
12358 		fixup_plane_bitmasks(crtc_state);
12359 	}
12360 }
12361 
12362 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12363 {
12364 	struct drm_i915_private *dev_priv = to_i915(dev);
12365 	struct intel_cdclk_state *cdclk_state =
12366 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12367 	struct intel_dbuf_state *dbuf_state =
12368 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12369 	enum pipe pipe;
12370 	struct intel_crtc *crtc;
12371 	struct intel_encoder *encoder;
12372 	struct intel_connector *connector;
12373 	struct drm_connector_list_iter conn_iter;
12374 	u8 active_pipes = 0;
12375 
12376 	for_each_intel_crtc(dev, crtc) {
12377 		struct intel_crtc_state *crtc_state =
12378 			to_intel_crtc_state(crtc->base.state);
12379 
12380 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12381 		intel_crtc_free_hw_state(crtc_state);
12382 		intel_crtc_state_reset(crtc_state, crtc);
12383 
12384 		intel_crtc_get_pipe_config(crtc_state);
12385 
12386 		crtc_state->hw.enable = crtc_state->hw.active;
12387 
12388 		crtc->base.enabled = crtc_state->hw.enable;
12389 		crtc->active = crtc_state->hw.active;
12390 
12391 		if (crtc_state->hw.active)
12392 			active_pipes |= BIT(crtc->pipe);
12393 
12394 		drm_dbg_kms(&dev_priv->drm,
12395 			    "[CRTC:%d:%s] hw state readout: %s\n",
12396 			    crtc->base.base.id, crtc->base.name,
12397 			    enableddisabled(crtc_state->hw.active));
12398 	}
12399 
12400 	dev_priv->active_pipes = cdclk_state->active_pipes =
12401 		dbuf_state->active_pipes = active_pipes;
12402 
12403 	readout_plane_state(dev_priv);
12404 
12405 	for_each_intel_encoder(dev, encoder) {
12406 		pipe = 0;
12407 
12408 		if (encoder->get_hw_state(encoder, &pipe)) {
12409 			struct intel_crtc_state *crtc_state;
12410 
12411 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12412 			crtc_state = to_intel_crtc_state(crtc->base.state);
12413 
12414 			encoder->base.crtc = &crtc->base;
12415 			intel_encoder_get_config(encoder, crtc_state);
12416 			if (encoder->sync_state)
12417 				encoder->sync_state(encoder, crtc_state);
12418 
12419 			/* read out to slave crtc as well for bigjoiner */
12420 			if (crtc_state->bigjoiner) {
12421 				/* encoder should read be linked to bigjoiner master */
12422 				WARN_ON(crtc_state->bigjoiner_slave);
12423 
12424 				crtc = crtc_state->bigjoiner_linked_crtc;
12425 				crtc_state = to_intel_crtc_state(crtc->base.state);
12426 				intel_encoder_get_config(encoder, crtc_state);
12427 			}
12428 		} else {
12429 			encoder->base.crtc = NULL;
12430 		}
12431 
12432 		drm_dbg_kms(&dev_priv->drm,
12433 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12434 			    encoder->base.base.id, encoder->base.name,
12435 			    enableddisabled(encoder->base.crtc),
12436 			    pipe_name(pipe));
12437 	}
12438 
12439 	intel_dpll_readout_hw_state(dev_priv);
12440 
12441 	drm_connector_list_iter_begin(dev, &conn_iter);
12442 	for_each_intel_connector_iter(connector, &conn_iter) {
12443 		if (connector->get_hw_state(connector)) {
12444 			struct intel_crtc_state *crtc_state;
12445 			struct intel_crtc *crtc;
12446 
12447 			connector->base.dpms = DRM_MODE_DPMS_ON;
12448 
12449 			encoder = intel_attached_encoder(connector);
12450 			connector->base.encoder = &encoder->base;
12451 
12452 			crtc = to_intel_crtc(encoder->base.crtc);
12453 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12454 
12455 			if (crtc_state && crtc_state->hw.active) {
12456 				/*
12457 				 * This has to be done during hardware readout
12458 				 * because anything calling .crtc_disable may
12459 				 * rely on the connector_mask being accurate.
12460 				 */
12461 				crtc_state->uapi.connector_mask |=
12462 					drm_connector_mask(&connector->base);
12463 				crtc_state->uapi.encoder_mask |=
12464 					drm_encoder_mask(&encoder->base);
12465 			}
12466 		} else {
12467 			connector->base.dpms = DRM_MODE_DPMS_OFF;
12468 			connector->base.encoder = NULL;
12469 		}
12470 		drm_dbg_kms(&dev_priv->drm,
12471 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
12472 			    connector->base.base.id, connector->base.name,
12473 			    enableddisabled(connector->base.encoder));
12474 	}
12475 	drm_connector_list_iter_end(&conn_iter);
12476 
12477 	for_each_intel_crtc(dev, crtc) {
12478 		struct intel_bw_state *bw_state =
12479 			to_intel_bw_state(dev_priv->bw_obj.state);
12480 		struct intel_crtc_state *crtc_state =
12481 			to_intel_crtc_state(crtc->base.state);
12482 		struct intel_plane *plane;
12483 		int min_cdclk = 0;
12484 
12485 		if (crtc_state->bigjoiner_slave)
12486 			continue;
12487 
12488 		if (crtc_state->hw.active) {
12489 			/*
12490 			 * The initial mode needs to be set in order to keep
12491 			 * the atomic core happy. It wants a valid mode if the
12492 			 * crtc's enabled, so we do the above call.
12493 			 *
12494 			 * But we don't set all the derived state fully, hence
12495 			 * set a flag to indicate that a full recalculation is
12496 			 * needed on the next commit.
12497 			 */
12498 			crtc_state->inherited = true;
12499 
12500 			intel_crtc_update_active_timings(crtc_state);
12501 
12502 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
12503 		}
12504 
12505 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12506 			const struct intel_plane_state *plane_state =
12507 				to_intel_plane_state(plane->base.state);
12508 
12509 			/*
12510 			 * FIXME don't have the fb yet, so can't
12511 			 * use intel_plane_data_rate() :(
12512 			 */
12513 			if (plane_state->uapi.visible)
12514 				crtc_state->data_rate[plane->id] =
12515 					4 * crtc_state->pixel_rate;
12516 			/*
12517 			 * FIXME don't have the fb yet, so can't
12518 			 * use plane->min_cdclk() :(
12519 			 */
12520 			if (plane_state->uapi.visible && plane->min_cdclk) {
12521 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12522 					crtc_state->min_cdclk[plane->id] =
12523 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12524 				else
12525 					crtc_state->min_cdclk[plane->id] =
12526 						crtc_state->pixel_rate;
12527 			}
12528 			drm_dbg_kms(&dev_priv->drm,
12529 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
12530 				    plane->base.base.id, plane->base.name,
12531 				    crtc_state->min_cdclk[plane->id]);
12532 		}
12533 
12534 		if (crtc_state->hw.active) {
12535 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12536 			if (drm_WARN_ON(dev, min_cdclk < 0))
12537 				min_cdclk = 0;
12538 		}
12539 
12540 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
12541 		cdclk_state->min_voltage_level[crtc->pipe] =
12542 			crtc_state->min_voltage_level;
12543 
12544 		intel_bw_crtc_update(bw_state, crtc_state);
12545 
12546 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
12547 
12548 		/* discard our incomplete slave state, copy it from master */
12549 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
12550 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
12551 			struct intel_crtc_state *slave_crtc_state =
12552 				to_intel_crtc_state(slave->base.state);
12553 
12554 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
12555 			slave->base.mode = crtc->base.mode;
12556 
12557 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
12558 			cdclk_state->min_voltage_level[slave->pipe] =
12559 				crtc_state->min_voltage_level;
12560 
12561 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
12562 				const struct intel_plane_state *plane_state =
12563 					to_intel_plane_state(plane->base.state);
12564 
12565 				/*
12566 				 * FIXME don't have the fb yet, so can't
12567 				 * use intel_plane_data_rate() :(
12568 				 */
12569 				if (plane_state->uapi.visible)
12570 					crtc_state->data_rate[plane->id] =
12571 						4 * crtc_state->pixel_rate;
12572 				else
12573 					crtc_state->data_rate[plane->id] = 0;
12574 			}
12575 
12576 			intel_bw_crtc_update(bw_state, slave_crtc_state);
12577 			drm_calc_timestamping_constants(&slave->base,
12578 							&slave_crtc_state->hw.adjusted_mode);
12579 		}
12580 	}
12581 }
12582 
12583 static void
12584 get_encoder_power_domains(struct drm_i915_private *dev_priv)
12585 {
12586 	struct intel_encoder *encoder;
12587 
12588 	for_each_intel_encoder(&dev_priv->drm, encoder) {
12589 		struct intel_crtc_state *crtc_state;
12590 
12591 		if (!encoder->get_power_domains)
12592 			continue;
12593 
12594 		/*
12595 		 * MST-primary and inactive encoders don't have a crtc state
12596 		 * and neither of these require any power domain references.
12597 		 */
12598 		if (!encoder->base.crtc)
12599 			continue;
12600 
12601 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
12602 		encoder->get_power_domains(encoder, crtc_state);
12603 	}
12604 }
12605 
12606 static void intel_early_display_was(struct drm_i915_private *dev_priv)
12607 {
12608 	/*
12609 	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
12610 	 * Also known as Wa_14010480278.
12611 	 */
12612 	if (IS_DISPLAY_RANGE(dev_priv, 10, 12))
12613 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
12614 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
12615 
12616 	if (IS_HASWELL(dev_priv)) {
12617 		/*
12618 		 * WaRsPkgCStateDisplayPMReq:hsw
12619 		 * System hang if this isn't done before disabling all planes!
12620 		 */
12621 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
12622 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
12623 	}
12624 
12625 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
12626 		/* Display WA #1142:kbl,cfl,cml */
12627 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
12628 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
12629 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
12630 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
12631 			     KBL_ARB_FILL_SPARE_14);
12632 	}
12633 }
12634 
12635 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
12636 				       enum port port, i915_reg_t hdmi_reg)
12637 {
12638 	u32 val = intel_de_read(dev_priv, hdmi_reg);
12639 
12640 	if (val & SDVO_ENABLE ||
12641 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
12642 		return;
12643 
12644 	drm_dbg_kms(&dev_priv->drm,
12645 		    "Sanitizing transcoder select for HDMI %c\n",
12646 		    port_name(port));
12647 
12648 	val &= ~SDVO_PIPE_SEL_MASK;
12649 	val |= SDVO_PIPE_SEL(PIPE_A);
12650 
12651 	intel_de_write(dev_priv, hdmi_reg, val);
12652 }
12653 
12654 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
12655 				     enum port port, i915_reg_t dp_reg)
12656 {
12657 	u32 val = intel_de_read(dev_priv, dp_reg);
12658 
12659 	if (val & DP_PORT_EN ||
12660 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
12661 		return;
12662 
12663 	drm_dbg_kms(&dev_priv->drm,
12664 		    "Sanitizing transcoder select for DP %c\n",
12665 		    port_name(port));
12666 
12667 	val &= ~DP_PIPE_SEL_MASK;
12668 	val |= DP_PIPE_SEL(PIPE_A);
12669 
12670 	intel_de_write(dev_priv, dp_reg, val);
12671 }
12672 
12673 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
12674 {
12675 	/*
12676 	 * The BIOS may select transcoder B on some of the PCH
12677 	 * ports even it doesn't enable the port. This would trip
12678 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
12679 	 * Sanitize the transcoder select bits to prevent that. We
12680 	 * assume that the BIOS never actually enabled the port,
12681 	 * because if it did we'd actually have to toggle the port
12682 	 * on and back off to make the transcoder A select stick
12683 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
12684 	 * intel_disable_sdvo()).
12685 	 */
12686 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
12687 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
12688 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
12689 
12690 	/* PCH SDVOB multiplex with HDMIB */
12691 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
12692 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
12693 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
12694 }
12695 
12696 /* Scan out the current hw modeset state,
12697  * and sanitizes it to the current state
12698  */
12699 static void
12700 intel_modeset_setup_hw_state(struct drm_device *dev,
12701 			     struct drm_modeset_acquire_ctx *ctx)
12702 {
12703 	struct drm_i915_private *dev_priv = to_i915(dev);
12704 	struct intel_encoder *encoder;
12705 	struct intel_crtc *crtc;
12706 	intel_wakeref_t wakeref;
12707 
12708 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
12709 
12710 	intel_early_display_was(dev_priv);
12711 	intel_modeset_readout_hw_state(dev);
12712 
12713 	/* HW state is read out, now we need to sanitize this mess. */
12714 
12715 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
12716 	for_each_intel_encoder(dev, encoder) {
12717 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
12718 
12719 		/* We need to sanitize only the MST primary port. */
12720 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
12721 		    intel_phy_is_tc(dev_priv, phy))
12722 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
12723 	}
12724 
12725 	get_encoder_power_domains(dev_priv);
12726 
12727 	if (HAS_PCH_IBX(dev_priv))
12728 		ibx_sanitize_pch_ports(dev_priv);
12729 
12730 	/*
12731 	 * intel_sanitize_plane_mapping() may need to do vblank
12732 	 * waits, so we need vblank interrupts restored beforehand.
12733 	 */
12734 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12735 		struct intel_crtc_state *crtc_state =
12736 			to_intel_crtc_state(crtc->base.state);
12737 
12738 		drm_crtc_vblank_reset(&crtc->base);
12739 
12740 		if (crtc_state->hw.active)
12741 			intel_crtc_vblank_on(crtc_state);
12742 	}
12743 
12744 	intel_sanitize_plane_mapping(dev_priv);
12745 
12746 	for_each_intel_encoder(dev, encoder)
12747 		intel_sanitize_encoder(encoder);
12748 
12749 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12750 		struct intel_crtc_state *crtc_state =
12751 			to_intel_crtc_state(crtc->base.state);
12752 
12753 		intel_sanitize_crtc(crtc, ctx);
12754 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
12755 	}
12756 
12757 	intel_modeset_update_connector_atomic_state(dev);
12758 
12759 	intel_dpll_sanitize_state(dev_priv);
12760 
12761 	if (IS_G4X(dev_priv)) {
12762 		g4x_wm_get_hw_state(dev_priv);
12763 		g4x_wm_sanitize(dev_priv);
12764 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12765 		vlv_wm_get_hw_state(dev_priv);
12766 		vlv_wm_sanitize(dev_priv);
12767 	} else if (DISPLAY_VER(dev_priv) >= 9) {
12768 		skl_wm_get_hw_state(dev_priv);
12769 	} else if (HAS_PCH_SPLIT(dev_priv)) {
12770 		ilk_wm_get_hw_state(dev_priv);
12771 	}
12772 
12773 	for_each_intel_crtc(dev, crtc) {
12774 		struct intel_crtc_state *crtc_state =
12775 			to_intel_crtc_state(crtc->base.state);
12776 		u64 put_domains;
12777 
12778 		put_domains = modeset_get_crtc_power_domains(crtc_state);
12779 		if (drm_WARN_ON(dev, put_domains))
12780 			modeset_put_crtc_power_domains(crtc, put_domains);
12781 	}
12782 
12783 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
12784 }
12785 
12786 void intel_display_resume(struct drm_device *dev)
12787 {
12788 	struct drm_i915_private *dev_priv = to_i915(dev);
12789 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
12790 	struct drm_modeset_acquire_ctx ctx;
12791 	int ret;
12792 
12793 	dev_priv->modeset_restore_state = NULL;
12794 	if (state)
12795 		state->acquire_ctx = &ctx;
12796 
12797 	drm_modeset_acquire_init(&ctx, 0);
12798 
12799 	while (1) {
12800 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
12801 		if (ret != -EDEADLK)
12802 			break;
12803 
12804 		drm_modeset_backoff(&ctx);
12805 	}
12806 
12807 	if (!ret)
12808 		ret = __intel_display_resume(dev, state, &ctx);
12809 
12810 	intel_enable_ipc(dev_priv);
12811 	drm_modeset_drop_locks(&ctx);
12812 	drm_modeset_acquire_fini(&ctx);
12813 
12814 	if (ret)
12815 		drm_err(&dev_priv->drm,
12816 			"Restoring old state failed with %i\n", ret);
12817 	if (state)
12818 		drm_atomic_state_put(state);
12819 }
12820 
12821 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
12822 {
12823 	struct intel_connector *connector;
12824 	struct drm_connector_list_iter conn_iter;
12825 
12826 	/* Kill all the work that may have been queued by hpd. */
12827 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
12828 	for_each_intel_connector_iter(connector, &conn_iter) {
12829 		if (connector->modeset_retry_work.func)
12830 			cancel_work_sync(&connector->modeset_retry_work);
12831 		if (connector->hdcp.shim) {
12832 			cancel_delayed_work_sync(&connector->hdcp.check_work);
12833 			cancel_work_sync(&connector->hdcp.prop_work);
12834 		}
12835 	}
12836 	drm_connector_list_iter_end(&conn_iter);
12837 }
12838 
12839 /* part #1: call before irq uninstall */
12840 void intel_modeset_driver_remove(struct drm_i915_private *i915)
12841 {
12842 	flush_workqueue(i915->flip_wq);
12843 	flush_workqueue(i915->modeset_wq);
12844 
12845 	flush_work(&i915->atomic_helper.free_work);
12846 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
12847 }
12848 
12849 /* part #2: call after irq uninstall */
12850 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
12851 {
12852 	/*
12853 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
12854 	 * poll handlers. Hence disable polling after hpd handling is shut down.
12855 	 */
12856 	intel_hpd_poll_fini(i915);
12857 
12858 	/*
12859 	 * MST topology needs to be suspended so we don't have any calls to
12860 	 * fbdev after it's finalized. MST will be destroyed later as part of
12861 	 * drm_mode_config_cleanup()
12862 	 */
12863 	intel_dp_mst_suspend(i915);
12864 
12865 	/* poll work can call into fbdev, hence clean that up afterwards */
12866 	intel_fbdev_fini(i915);
12867 
12868 	intel_unregister_dsm_handler();
12869 
12870 	intel_fbc_global_disable(i915);
12871 
12872 	/* flush any delayed tasks or pending work */
12873 	flush_scheduled_work();
12874 
12875 	intel_hdcp_component_fini(i915);
12876 
12877 	intel_mode_config_cleanup(i915);
12878 
12879 	intel_overlay_cleanup(i915);
12880 
12881 	intel_gmbus_teardown(i915);
12882 
12883 	destroy_workqueue(i915->flip_wq);
12884 	destroy_workqueue(i915->modeset_wq);
12885 
12886 	intel_fbc_cleanup_cfb(i915);
12887 }
12888 
12889 /* part #3: call after gem init */
12890 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
12891 {
12892 	intel_csr_ucode_fini(i915);
12893 
12894 	intel_power_domains_driver_remove(i915);
12895 
12896 	intel_vga_unregister(i915);
12897 
12898 	intel_bios_driver_remove(i915);
12899 }
12900 
12901 void intel_display_driver_register(struct drm_i915_private *i915)
12902 {
12903 	if (!HAS_DISPLAY(i915))
12904 		return;
12905 
12906 	intel_display_debugfs_register(i915);
12907 
12908 	/* Must be done after probing outputs */
12909 	intel_opregion_register(i915);
12910 	acpi_video_register();
12911 
12912 	intel_audio_init(i915);
12913 
12914 	/*
12915 	 * Some ports require correctly set-up hpd registers for
12916 	 * detection to work properly (leading to ghost connected
12917 	 * connector status), e.g. VGA on gm45.  Hence we can only set
12918 	 * up the initial fbdev config after hpd irqs are fully
12919 	 * enabled. We do it last so that the async config cannot run
12920 	 * before the connectors are registered.
12921 	 */
12922 	intel_fbdev_initial_config_async(&i915->drm);
12923 
12924 	/*
12925 	 * We need to coordinate the hotplugs with the asynchronous
12926 	 * fbdev configuration, for which we use the
12927 	 * fbdev->async_cookie.
12928 	 */
12929 	drm_kms_helper_poll_init(&i915->drm);
12930 }
12931 
12932 void intel_display_driver_unregister(struct drm_i915_private *i915)
12933 {
12934 	if (!HAS_DISPLAY(i915))
12935 		return;
12936 
12937 	intel_fbdev_unregister(i915);
12938 	intel_audio_deinit(i915);
12939 
12940 	/*
12941 	 * After flushing the fbdev (incl. a late async config which
12942 	 * will have delayed queuing of a hotplug event), then flush
12943 	 * the hotplug events.
12944 	 */
12945 	drm_kms_helper_poll_fini(&i915->drm);
12946 	drm_atomic_helper_shutdown(&i915->drm);
12947 
12948 	acpi_video_unregister();
12949 	intel_opregion_unregister(i915);
12950 }
12951 
12952 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
12953 
12954 struct intel_display_error_state {
12955 
12956 	u32 power_well_driver;
12957 
12958 	struct intel_cursor_error_state {
12959 		u32 control;
12960 		u32 position;
12961 		u32 base;
12962 		u32 size;
12963 	} cursor[I915_MAX_PIPES];
12964 
12965 	struct intel_pipe_error_state {
12966 		bool power_domain_on;
12967 		u32 source;
12968 		u32 stat;
12969 	} pipe[I915_MAX_PIPES];
12970 
12971 	struct intel_plane_error_state {
12972 		u32 control;
12973 		u32 stride;
12974 		u32 size;
12975 		u32 pos;
12976 		u32 addr;
12977 		u32 surface;
12978 		u32 tile_offset;
12979 	} plane[I915_MAX_PIPES];
12980 
12981 	struct intel_transcoder_error_state {
12982 		bool available;
12983 		bool power_domain_on;
12984 		enum transcoder cpu_transcoder;
12985 
12986 		u32 conf;
12987 
12988 		u32 htotal;
12989 		u32 hblank;
12990 		u32 hsync;
12991 		u32 vtotal;
12992 		u32 vblank;
12993 		u32 vsync;
12994 	} transcoder[5];
12995 };
12996 
12997 struct intel_display_error_state *
12998 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
12999 {
13000 	struct intel_display_error_state *error;
13001 	int transcoders[] = {
13002 		TRANSCODER_A,
13003 		TRANSCODER_B,
13004 		TRANSCODER_C,
13005 		TRANSCODER_D,
13006 		TRANSCODER_EDP,
13007 	};
13008 	int i;
13009 
13010 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
13011 
13012 	if (!HAS_DISPLAY(dev_priv))
13013 		return NULL;
13014 
13015 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
13016 	if (error == NULL)
13017 		return NULL;
13018 
13019 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13020 		error->power_well_driver = intel_de_read(dev_priv,
13021 							 HSW_PWR_WELL_CTL2);
13022 
13023 	for_each_pipe(dev_priv, i) {
13024 		error->pipe[i].power_domain_on =
13025 			__intel_display_power_is_enabled(dev_priv,
13026 							 POWER_DOMAIN_PIPE(i));
13027 		if (!error->pipe[i].power_domain_on)
13028 			continue;
13029 
13030 		error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
13031 		error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
13032 		error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
13033 
13034 		error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
13035 		error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
13036 		if (DISPLAY_VER(dev_priv) <= 3) {
13037 			error->plane[i].size = intel_de_read(dev_priv,
13038 							     DSPSIZE(i));
13039 			error->plane[i].pos = intel_de_read(dev_priv,
13040 							    DSPPOS(i));
13041 		}
13042 		if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13043 			error->plane[i].addr = intel_de_read(dev_priv,
13044 							     DSPADDR(i));
13045 		if (DISPLAY_VER(dev_priv) >= 4) {
13046 			error->plane[i].surface = intel_de_read(dev_priv,
13047 								DSPSURF(i));
13048 			error->plane[i].tile_offset = intel_de_read(dev_priv,
13049 								    DSPTILEOFF(i));
13050 		}
13051 
13052 		error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
13053 
13054 		if (HAS_GMCH(dev_priv))
13055 			error->pipe[i].stat = intel_de_read(dev_priv,
13056 							    PIPESTAT(i));
13057 	}
13058 
13059 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13060 		enum transcoder cpu_transcoder = transcoders[i];
13061 
13062 		if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
13063 			continue;
13064 
13065 		error->transcoder[i].available = true;
13066 		error->transcoder[i].power_domain_on =
13067 			__intel_display_power_is_enabled(dev_priv,
13068 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13069 		if (!error->transcoder[i].power_domain_on)
13070 			continue;
13071 
13072 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13073 
13074 		error->transcoder[i].conf = intel_de_read(dev_priv,
13075 							  PIPECONF(cpu_transcoder));
13076 		error->transcoder[i].htotal = intel_de_read(dev_priv,
13077 							    HTOTAL(cpu_transcoder));
13078 		error->transcoder[i].hblank = intel_de_read(dev_priv,
13079 							    HBLANK(cpu_transcoder));
13080 		error->transcoder[i].hsync = intel_de_read(dev_priv,
13081 							   HSYNC(cpu_transcoder));
13082 		error->transcoder[i].vtotal = intel_de_read(dev_priv,
13083 							    VTOTAL(cpu_transcoder));
13084 		error->transcoder[i].vblank = intel_de_read(dev_priv,
13085 							    VBLANK(cpu_transcoder));
13086 		error->transcoder[i].vsync = intel_de_read(dev_priv,
13087 							   VSYNC(cpu_transcoder));
13088 	}
13089 
13090 	return error;
13091 }
13092 
13093 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13094 
13095 void
13096 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13097 				struct intel_display_error_state *error)
13098 {
13099 	struct drm_i915_private *dev_priv = m->i915;
13100 	int i;
13101 
13102 	if (!error)
13103 		return;
13104 
13105 	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
13106 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13107 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13108 			   error->power_well_driver);
13109 	for_each_pipe(dev_priv, i) {
13110 		err_printf(m, "Pipe [%d]:\n", i);
13111 		err_printf(m, "  Power: %s\n",
13112 			   onoff(error->pipe[i].power_domain_on));
13113 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13114 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13115 
13116 		err_printf(m, "Plane [%d]:\n", i);
13117 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13118 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13119 		if (DISPLAY_VER(dev_priv) <= 3) {
13120 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13121 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13122 		}
13123 		if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13124 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13125 		if (DISPLAY_VER(dev_priv) >= 4) {
13126 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13127 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13128 		}
13129 
13130 		err_printf(m, "Cursor [%d]:\n", i);
13131 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13132 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13133 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13134 	}
13135 
13136 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13137 		if (!error->transcoder[i].available)
13138 			continue;
13139 
13140 		err_printf(m, "CPU transcoder: %s\n",
13141 			   transcoder_name(error->transcoder[i].cpu_transcoder));
13142 		err_printf(m, "  Power: %s\n",
13143 			   onoff(error->transcoder[i].power_domain_on));
13144 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13145 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13146 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13147 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13148 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13149 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13150 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13151 	}
13152 }
13153 
13154 #endif
13155