1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/dp/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_drrs.h"
56 #include "display/intel_dsi.h"
57 #include "display/intel_dvo.h"
58 #include "display/intel_fb.h"
59 #include "display/intel_gmbus.h"
60 #include "display/intel_hdmi.h"
61 #include "display/intel_lvds.h"
62 #include "display/intel_sdvo.h"
63 #include "display/intel_snps_phy.h"
64 #include "display/intel_tv.h"
65 #include "display/intel_vdsc.h"
66 #include "display/intel_vrr.h"
67 
68 #include "gem/i915_gem_lmem.h"
69 #include "gem/i915_gem_object.h"
70 
71 #include "gt/gen8_ppgtt.h"
72 
73 #include "g4x_dp.h"
74 #include "g4x_hdmi.h"
75 #include "i915_drv.h"
76 #include "icl_dsi.h"
77 #include "intel_acpi.h"
78 #include "intel_atomic.h"
79 #include "intel_atomic_plane.h"
80 #include "intel_bw.h"
81 #include "intel_cdclk.h"
82 #include "intel_color.h"
83 #include "intel_crtc.h"
84 #include "intel_de.h"
85 #include "intel_display_types.h"
86 #include "intel_dmc.h"
87 #include "intel_dp_link_training.h"
88 #include "intel_dpt.h"
89 #include "intel_fbc.h"
90 #include "intel_fbdev.h"
91 #include "intel_fdi.h"
92 #include "intel_fifo_underrun.h"
93 #include "intel_frontbuffer.h"
94 #include "intel_hdcp.h"
95 #include "intel_hotplug.h"
96 #include "intel_overlay.h"
97 #include "intel_panel.h"
98 #include "intel_pch_display.h"
99 #include "intel_pch_refclk.h"
100 #include "intel_pcode.h"
101 #include "intel_pipe_crc.h"
102 #include "intel_plane_initial.h"
103 #include "intel_pm.h"
104 #include "intel_pps.h"
105 #include "intel_psr.h"
106 #include "intel_quirks.h"
107 #include "intel_sprite.h"
108 #include "intel_tc.h"
109 #include "intel_vga.h"
110 #include "i9xx_plane.h"
111 #include "skl_scaler.h"
112 #include "skl_universal_plane.h"
113 #include "vlv_dsi_pll.h"
114 #include "vlv_sideband.h"
115 #include "vlv_dsi.h"
116 
117 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
118 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
119 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
120 					 const struct intel_link_m_n *m_n,
121 					 const struct intel_link_m_n *m2_n2);
122 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
124 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
125 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
126 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
127 static void intel_modeset_setup_hw_state(struct drm_device *dev,
128 					 struct drm_modeset_acquire_ctx *ctx);
129 
130 /**
131  * intel_update_watermarks - update FIFO watermark values based on current modes
132  * @dev_priv: i915 device
133  *
134  * Calculate watermark values for the various WM regs based on current mode
135  * and plane configuration.
136  *
137  * There are several cases to deal with here:
138  *   - normal (i.e. non-self-refresh)
139  *   - self-refresh (SR) mode
140  *   - lines are large relative to FIFO size (buffer can hold up to 2)
141  *   - lines are small relative to FIFO size (buffer can hold more than 2
142  *     lines), so need to account for TLB latency
143  *
144  *   The normal calculation is:
145  *     watermark = dotclock * bytes per pixel * latency
146  *   where latency is platform & configuration dependent (we assume pessimal
147  *   values here).
148  *
149  *   The SR calculation is:
150  *     watermark = (trunc(latency/line time)+1) * surface width *
151  *       bytes per pixel
152  *   where
153  *     line time = htotal / dotclock
154  *     surface width = hdisplay for normal plane and 64 for cursor
155  *   and latency is assumed to be high, as above.
156  *
157  * The final value programmed to the register should always be rounded up,
158  * and include an extra 2 entries to account for clock crossings.
159  *
160  * We don't use the sprite, so we can ignore that.  And on Crestline we have
161  * to set the non-SR watermarks to 8.
162  */
163 static void intel_update_watermarks(struct drm_i915_private *dev_priv)
164 {
165 	if (dev_priv->wm_disp->update_wm)
166 		dev_priv->wm_disp->update_wm(dev_priv);
167 }
168 
169 static int intel_compute_pipe_wm(struct intel_atomic_state *state,
170 				 struct intel_crtc *crtc)
171 {
172 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
173 	if (dev_priv->wm_disp->compute_pipe_wm)
174 		return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
175 	return 0;
176 }
177 
178 static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
179 					 struct intel_crtc *crtc)
180 {
181 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
182 	if (!dev_priv->wm_disp->compute_intermediate_wm)
183 		return 0;
184 	if (drm_WARN_ON(&dev_priv->drm,
185 			!dev_priv->wm_disp->compute_pipe_wm))
186 		return 0;
187 	return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
188 }
189 
190 static bool intel_initial_watermarks(struct intel_atomic_state *state,
191 				     struct intel_crtc *crtc)
192 {
193 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
194 	if (dev_priv->wm_disp->initial_watermarks) {
195 		dev_priv->wm_disp->initial_watermarks(state, crtc);
196 		return true;
197 	}
198 	return false;
199 }
200 
201 static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
202 					   struct intel_crtc *crtc)
203 {
204 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
205 	if (dev_priv->wm_disp->atomic_update_watermarks)
206 		dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
207 }
208 
209 static void intel_optimize_watermarks(struct intel_atomic_state *state,
210 				      struct intel_crtc *crtc)
211 {
212 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
213 	if (dev_priv->wm_disp->optimize_watermarks)
214 		dev_priv->wm_disp->optimize_watermarks(state, crtc);
215 }
216 
217 static int intel_compute_global_watermarks(struct intel_atomic_state *state)
218 {
219 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
220 	if (dev_priv->wm_disp->compute_global_watermarks)
221 		return dev_priv->wm_disp->compute_global_watermarks(state);
222 	return 0;
223 }
224 
225 /* returns HPLL frequency in kHz */
226 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
227 {
228 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
229 
230 	/* Obtain SKU information */
231 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
232 		CCK_FUSE_HPLL_FREQ_MASK;
233 
234 	return vco_freq[hpll_freq] * 1000;
235 }
236 
237 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
238 		      const char *name, u32 reg, int ref_freq)
239 {
240 	u32 val;
241 	int divider;
242 
243 	val = vlv_cck_read(dev_priv, reg);
244 	divider = val & CCK_FREQUENCY_VALUES;
245 
246 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
247 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
248 		 "%s change in progress\n", name);
249 
250 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
251 }
252 
253 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
254 			   const char *name, u32 reg)
255 {
256 	int hpll;
257 
258 	vlv_cck_get(dev_priv);
259 
260 	if (dev_priv->hpll_freq == 0)
261 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
262 
263 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
264 
265 	vlv_cck_put(dev_priv);
266 
267 	return hpll;
268 }
269 
270 static void intel_update_czclk(struct drm_i915_private *dev_priv)
271 {
272 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
273 		return;
274 
275 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
276 						      CCK_CZ_CLOCK_CONTROL);
277 
278 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
279 		dev_priv->czclk_freq);
280 }
281 
282 static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
283 {
284 	return (crtc_state->active_planes &
285 		~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
286 }
287 
288 /* WA Display #0827: Gen9:all */
289 static void
290 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
291 {
292 	if (enable)
293 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
294 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
295 	else
296 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
297 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
298 }
299 
300 /* Wa_2006604312:icl,ehl */
301 static void
302 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
303 		       bool enable)
304 {
305 	if (enable)
306 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
307 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
308 	else
309 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
310 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
311 }
312 
313 /* Wa_1604331009:icl,jsl,ehl */
314 static void
315 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
316 		       bool enable)
317 {
318 	intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
319 		     enable ? CURSOR_GATING_DIS : 0);
320 }
321 
322 static bool
323 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
324 {
325 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
326 }
327 
328 static bool
329 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
330 {
331 	return crtc_state->sync_mode_slaves_mask != 0;
332 }
333 
334 bool
335 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
336 {
337 	return is_trans_port_sync_master(crtc_state) ||
338 		is_trans_port_sync_slave(crtc_state);
339 }
340 
341 static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
342 {
343 	if (crtc_state->bigjoiner_slave)
344 		return crtc_state->bigjoiner_linked_crtc;
345 	else
346 		return to_intel_crtc(crtc_state->uapi.crtc);
347 }
348 
349 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
350 				    enum pipe pipe)
351 {
352 	i915_reg_t reg = PIPEDSL(pipe);
353 	u32 line1, line2;
354 	u32 line_mask;
355 
356 	if (DISPLAY_VER(dev_priv) == 2)
357 		line_mask = DSL_LINEMASK_GEN2;
358 	else
359 		line_mask = DSL_LINEMASK_GEN3;
360 
361 	line1 = intel_de_read(dev_priv, reg) & line_mask;
362 	msleep(5);
363 	line2 = intel_de_read(dev_priv, reg) & line_mask;
364 
365 	return line1 != line2;
366 }
367 
368 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
369 {
370 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
371 	enum pipe pipe = crtc->pipe;
372 
373 	/* Wait for the display line to settle/start moving */
374 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
375 		drm_err(&dev_priv->drm,
376 			"pipe %c scanline %s wait timed out\n",
377 			pipe_name(pipe), onoff(state));
378 }
379 
380 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
381 {
382 	wait_for_pipe_scanline_moving(crtc, false);
383 }
384 
385 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
386 {
387 	wait_for_pipe_scanline_moving(crtc, true);
388 }
389 
390 static void
391 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
392 {
393 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
394 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
395 
396 	if (DISPLAY_VER(dev_priv) >= 4) {
397 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
398 		i915_reg_t reg = PIPECONF(cpu_transcoder);
399 
400 		/* Wait for the Pipe State to go off */
401 		if (intel_de_wait_for_clear(dev_priv, reg,
402 					    I965_PIPECONF_ACTIVE, 100))
403 			drm_WARN(&dev_priv->drm, 1,
404 				 "pipe_off wait timed out\n");
405 	} else {
406 		intel_wait_for_pipe_scanline_stopped(crtc);
407 	}
408 }
409 
410 void assert_transcoder(struct drm_i915_private *dev_priv,
411 		       enum transcoder cpu_transcoder, bool state)
412 {
413 	bool cur_state;
414 	enum intel_display_power_domain power_domain;
415 	intel_wakeref_t wakeref;
416 
417 	/* we keep both pipes enabled on 830 */
418 	if (IS_I830(dev_priv))
419 		state = true;
420 
421 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
422 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
423 	if (wakeref) {
424 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
425 		cur_state = !!(val & PIPECONF_ENABLE);
426 
427 		intel_display_power_put(dev_priv, power_domain, wakeref);
428 	} else {
429 		cur_state = false;
430 	}
431 
432 	I915_STATE_WARN(cur_state != state,
433 			"transcoder %s assertion failure (expected %s, current %s)\n",
434 			transcoder_name(cpu_transcoder),
435 			onoff(state), onoff(cur_state));
436 }
437 
438 static void assert_plane(struct intel_plane *plane, bool state)
439 {
440 	enum pipe pipe;
441 	bool cur_state;
442 
443 	cur_state = plane->get_hw_state(plane, &pipe);
444 
445 	I915_STATE_WARN(cur_state != state,
446 			"%s assertion failure (expected %s, current %s)\n",
447 			plane->base.name, onoff(state), onoff(cur_state));
448 }
449 
450 #define assert_plane_enabled(p) assert_plane(p, true)
451 #define assert_plane_disabled(p) assert_plane(p, false)
452 
453 static void assert_planes_disabled(struct intel_crtc *crtc)
454 {
455 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
456 	struct intel_plane *plane;
457 
458 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
459 		assert_plane_disabled(plane);
460 }
461 
462 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
463 			 struct intel_digital_port *dig_port,
464 			 unsigned int expected_mask)
465 {
466 	u32 port_mask;
467 	i915_reg_t dpll_reg;
468 
469 	switch (dig_port->base.port) {
470 	case PORT_B:
471 		port_mask = DPLL_PORTB_READY_MASK;
472 		dpll_reg = DPLL(0);
473 		break;
474 	case PORT_C:
475 		port_mask = DPLL_PORTC_READY_MASK;
476 		dpll_reg = DPLL(0);
477 		expected_mask <<= 4;
478 		break;
479 	case PORT_D:
480 		port_mask = DPLL_PORTD_READY_MASK;
481 		dpll_reg = DPIO_PHY_STATUS;
482 		break;
483 	default:
484 		BUG();
485 	}
486 
487 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
488 				       port_mask, expected_mask, 1000))
489 		drm_WARN(&dev_priv->drm, 1,
490 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
491 			 dig_port->base.base.base.id, dig_port->base.base.name,
492 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
493 			 expected_mask);
494 }
495 
496 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
497 {
498 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
499 
500 	if (HAS_PCH_LPT(dev_priv))
501 		return PIPE_A;
502 	else
503 		return crtc->pipe;
504 }
505 
506 void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
507 {
508 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
509 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
510 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
511 	enum pipe pipe = crtc->pipe;
512 	i915_reg_t reg;
513 	u32 val;
514 
515 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
516 
517 	assert_planes_disabled(crtc);
518 
519 	/*
520 	 * A pipe without a PLL won't actually be able to drive bits from
521 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
522 	 * need the check.
523 	 */
524 	if (HAS_GMCH(dev_priv)) {
525 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
526 			assert_dsi_pll_enabled(dev_priv);
527 		else
528 			assert_pll_enabled(dev_priv, pipe);
529 	} else {
530 		if (new_crtc_state->has_pch_encoder) {
531 			/* if driving the PCH, we need FDI enabled */
532 			assert_fdi_rx_pll_enabled(dev_priv,
533 						  intel_crtc_pch_transcoder(crtc));
534 			assert_fdi_tx_pll_enabled(dev_priv,
535 						  (enum pipe) cpu_transcoder);
536 		}
537 		/* FIXME: assert CPU port conditions for SNB+ */
538 	}
539 
540 	/* Wa_22012358565:adl-p */
541 	if (DISPLAY_VER(dev_priv) == 13)
542 		intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
543 			     0, PIPE_ARB_USE_PROG_SLOTS);
544 
545 	reg = PIPECONF(cpu_transcoder);
546 	val = intel_de_read(dev_priv, reg);
547 	if (val & PIPECONF_ENABLE) {
548 		/* we keep both pipes enabled on 830 */
549 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
550 		return;
551 	}
552 
553 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
554 	intel_de_posting_read(dev_priv, reg);
555 
556 	/*
557 	 * Until the pipe starts PIPEDSL reads will return a stale value,
558 	 * which causes an apparent vblank timestamp jump when PIPEDSL
559 	 * resets to its proper value. That also messes up the frame count
560 	 * when it's derived from the timestamps. So let's wait for the
561 	 * pipe to start properly before we call drm_crtc_vblank_on()
562 	 */
563 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
564 		intel_wait_for_pipe_scanline_moving(crtc);
565 }
566 
567 void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
568 {
569 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
570 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
571 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
572 	enum pipe pipe = crtc->pipe;
573 	i915_reg_t reg;
574 	u32 val;
575 
576 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
577 
578 	/*
579 	 * Make sure planes won't keep trying to pump pixels to us,
580 	 * or we might hang the display.
581 	 */
582 	assert_planes_disabled(crtc);
583 
584 	reg = PIPECONF(cpu_transcoder);
585 	val = intel_de_read(dev_priv, reg);
586 	if ((val & PIPECONF_ENABLE) == 0)
587 		return;
588 
589 	/*
590 	 * Double wide has implications for planes
591 	 * so best keep it disabled when not needed.
592 	 */
593 	if (old_crtc_state->double_wide)
594 		val &= ~PIPECONF_DOUBLE_WIDE;
595 
596 	/* Don't disable pipe or pipe PLLs if needed */
597 	if (!IS_I830(dev_priv))
598 		val &= ~PIPECONF_ENABLE;
599 
600 	if (DISPLAY_VER(dev_priv) >= 12)
601 		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
602 			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
603 
604 	intel_de_write(dev_priv, reg, val);
605 	if ((val & PIPECONF_ENABLE) == 0)
606 		intel_wait_for_pipe_off(old_crtc_state);
607 }
608 
609 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
610 {
611 	unsigned int size = 0;
612 	int i;
613 
614 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
615 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
616 
617 	return size;
618 }
619 
620 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
621 {
622 	unsigned int size = 0;
623 	int i;
624 
625 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
626 		unsigned int plane_size;
627 
628 		if (rem_info->plane[i].linear)
629 			plane_size = rem_info->plane[i].size;
630 		else
631 			plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
632 
633 		if (plane_size == 0)
634 			continue;
635 
636 		if (rem_info->plane_alignment)
637 			size = ALIGN(size, rem_info->plane_alignment);
638 
639 		size += plane_size;
640 	}
641 
642 	return size;
643 }
644 
645 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
646 {
647 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
648 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
649 
650 	return DISPLAY_VER(dev_priv) < 4 ||
651 		(plane->fbc &&
652 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
653 }
654 
655 /*
656  * Convert the x/y offsets into a linear offset.
657  * Only valid with 0/180 degree rotation, which is fine since linear
658  * offset is only used with linear buffers on pre-hsw and tiled buffers
659  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
660  */
661 u32 intel_fb_xy_to_linear(int x, int y,
662 			  const struct intel_plane_state *state,
663 			  int color_plane)
664 {
665 	const struct drm_framebuffer *fb = state->hw.fb;
666 	unsigned int cpp = fb->format->cpp[color_plane];
667 	unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
668 
669 	return y * pitch + x * cpp;
670 }
671 
672 /*
673  * Add the x/y offsets derived from fb->offsets[] to the user
674  * specified plane src x/y offsets. The resulting x/y offsets
675  * specify the start of scanout from the beginning of the gtt mapping.
676  */
677 void intel_add_fb_offsets(int *x, int *y,
678 			  const struct intel_plane_state *state,
679 			  int color_plane)
680 
681 {
682 	*x += state->view.color_plane[color_plane].x;
683 	*y += state->view.color_plane[color_plane].y;
684 }
685 
686 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
687 			      u32 pixel_format, u64 modifier)
688 {
689 	struct intel_crtc *crtc;
690 	struct intel_plane *plane;
691 
692 	if (!HAS_DISPLAY(dev_priv))
693 		return 0;
694 
695 	/*
696 	 * We assume the primary plane for pipe A has
697 	 * the highest stride limits of them all,
698 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
699 	 */
700 	crtc = intel_get_first_crtc(dev_priv);
701 	if (!crtc)
702 		return 0;
703 
704 	plane = to_intel_plane(crtc->base.primary);
705 
706 	return plane->max_stride(plane, pixel_format, modifier,
707 				 DRM_MODE_ROTATE_0);
708 }
709 
710 static void
711 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
712 			struct intel_plane_state *plane_state,
713 			bool visible)
714 {
715 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
716 
717 	plane_state->uapi.visible = visible;
718 
719 	if (visible)
720 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
721 	else
722 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
723 }
724 
725 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
726 {
727 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
728 	struct drm_plane *plane;
729 
730 	/*
731 	 * Active_planes aliases if multiple "primary" or cursor planes
732 	 * have been used on the same (or wrong) pipe. plane_mask uses
733 	 * unique ids, hence we can use that to reconstruct active_planes.
734 	 */
735 	crtc_state->enabled_planes = 0;
736 	crtc_state->active_planes = 0;
737 
738 	drm_for_each_plane_mask(plane, &dev_priv->drm,
739 				crtc_state->uapi.plane_mask) {
740 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
741 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
742 	}
743 }
744 
745 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
746 				  struct intel_plane *plane)
747 {
748 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
749 	struct intel_crtc_state *crtc_state =
750 		to_intel_crtc_state(crtc->base.state);
751 	struct intel_plane_state *plane_state =
752 		to_intel_plane_state(plane->base.state);
753 
754 	drm_dbg_kms(&dev_priv->drm,
755 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
756 		    plane->base.base.id, plane->base.name,
757 		    crtc->base.base.id, crtc->base.name);
758 
759 	intel_set_plane_visible(crtc_state, plane_state, false);
760 	fixup_plane_bitmasks(crtc_state);
761 	crtc_state->data_rate[plane->id] = 0;
762 	crtc_state->min_cdclk[plane->id] = 0;
763 
764 	if (plane->id == PLANE_PRIMARY)
765 		hsw_disable_ips(crtc_state);
766 
767 	/*
768 	 * Vblank time updates from the shadow to live plane control register
769 	 * are blocked if the memory self-refresh mode is active at that
770 	 * moment. So to make sure the plane gets truly disabled, disable
771 	 * first the self-refresh mode. The self-refresh enable bit in turn
772 	 * will be checked/applied by the HW only at the next frame start
773 	 * event which is after the vblank start event, so we need to have a
774 	 * wait-for-vblank between disabling the plane and the pipe.
775 	 */
776 	if (HAS_GMCH(dev_priv) &&
777 	    intel_set_memory_cxsr(dev_priv, false))
778 		intel_wait_for_vblank(dev_priv, crtc->pipe);
779 
780 	/*
781 	 * Gen2 reports pipe underruns whenever all planes are disabled.
782 	 * So disable underrun reporting before all the planes get disabled.
783 	 */
784 	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
785 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
786 
787 	intel_plane_disable_arm(plane, crtc_state);
788 	intel_wait_for_vblank(dev_priv, crtc->pipe);
789 }
790 
791 unsigned int
792 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
793 {
794 	int x = 0, y = 0;
795 
796 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
797 					  plane_state->view.color_plane[0].offset, 0);
798 
799 	return y;
800 }
801 
802 static int
803 __intel_display_resume(struct drm_device *dev,
804 		       struct drm_atomic_state *state,
805 		       struct drm_modeset_acquire_ctx *ctx)
806 {
807 	struct drm_crtc_state *crtc_state;
808 	struct drm_crtc *crtc;
809 	int i, ret;
810 
811 	intel_modeset_setup_hw_state(dev, ctx);
812 	intel_vga_redisable(to_i915(dev));
813 
814 	if (!state)
815 		return 0;
816 
817 	/*
818 	 * We've duplicated the state, pointers to the old state are invalid.
819 	 *
820 	 * Don't attempt to use the old state until we commit the duplicated state.
821 	 */
822 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
823 		/*
824 		 * Force recalculation even if we restore
825 		 * current state. With fast modeset this may not result
826 		 * in a modeset when the state is compatible.
827 		 */
828 		crtc_state->mode_changed = true;
829 	}
830 
831 	/* ignore any reset values/BIOS leftovers in the WM registers */
832 	if (!HAS_GMCH(to_i915(dev)))
833 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
834 
835 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
836 
837 	drm_WARN_ON(dev, ret == -EDEADLK);
838 	return ret;
839 }
840 
841 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
842 {
843 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
844 		intel_has_gpu_reset(&dev_priv->gt));
845 }
846 
847 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
848 {
849 	struct drm_device *dev = &dev_priv->drm;
850 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
851 	struct drm_atomic_state *state;
852 	int ret;
853 
854 	if (!HAS_DISPLAY(dev_priv))
855 		return;
856 
857 	/* reset doesn't touch the display */
858 	if (!dev_priv->params.force_reset_modeset_test &&
859 	    !gpu_reset_clobbers_display(dev_priv))
860 		return;
861 
862 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
863 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
864 	smp_mb__after_atomic();
865 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
866 
867 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
868 		drm_dbg_kms(&dev_priv->drm,
869 			    "Modeset potentially stuck, unbreaking through wedging\n");
870 		intel_gt_set_wedged(&dev_priv->gt);
871 	}
872 
873 	/*
874 	 * Need mode_config.mutex so that we don't
875 	 * trample ongoing ->detect() and whatnot.
876 	 */
877 	mutex_lock(&dev->mode_config.mutex);
878 	drm_modeset_acquire_init(ctx, 0);
879 	while (1) {
880 		ret = drm_modeset_lock_all_ctx(dev, ctx);
881 		if (ret != -EDEADLK)
882 			break;
883 
884 		drm_modeset_backoff(ctx);
885 	}
886 	/*
887 	 * Disabling the crtcs gracefully seems nicer. Also the
888 	 * g33 docs say we should at least disable all the planes.
889 	 */
890 	state = drm_atomic_helper_duplicate_state(dev, ctx);
891 	if (IS_ERR(state)) {
892 		ret = PTR_ERR(state);
893 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
894 			ret);
895 		return;
896 	}
897 
898 	ret = drm_atomic_helper_disable_all(dev, ctx);
899 	if (ret) {
900 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
901 			ret);
902 		drm_atomic_state_put(state);
903 		return;
904 	}
905 
906 	dev_priv->modeset_restore_state = state;
907 	state->acquire_ctx = ctx;
908 }
909 
910 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
911 {
912 	struct drm_device *dev = &dev_priv->drm;
913 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
914 	struct drm_atomic_state *state;
915 	int ret;
916 
917 	if (!HAS_DISPLAY(dev_priv))
918 		return;
919 
920 	/* reset doesn't touch the display */
921 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
922 		return;
923 
924 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
925 	if (!state)
926 		goto unlock;
927 
928 	/* reset doesn't touch the display */
929 	if (!gpu_reset_clobbers_display(dev_priv)) {
930 		/* for testing only restore the display */
931 		ret = __intel_display_resume(dev, state, ctx);
932 		if (ret)
933 			drm_err(&dev_priv->drm,
934 				"Restoring old state failed with %i\n", ret);
935 	} else {
936 		/*
937 		 * The display has been reset as well,
938 		 * so need a full re-initialization.
939 		 */
940 		intel_pps_unlock_regs_wa(dev_priv);
941 		intel_modeset_init_hw(dev_priv);
942 		intel_init_clock_gating(dev_priv);
943 		intel_hpd_init(dev_priv);
944 
945 		ret = __intel_display_resume(dev, state, ctx);
946 		if (ret)
947 			drm_err(&dev_priv->drm,
948 				"Restoring old state failed with %i\n", ret);
949 
950 		intel_hpd_poll_disable(dev_priv);
951 	}
952 
953 	drm_atomic_state_put(state);
954 unlock:
955 	drm_modeset_drop_locks(ctx);
956 	drm_modeset_acquire_fini(ctx);
957 	mutex_unlock(&dev->mode_config.mutex);
958 
959 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
960 }
961 
962 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
963 {
964 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
965 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
966 	enum pipe pipe = crtc->pipe;
967 	u32 tmp;
968 
969 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
970 
971 	/*
972 	 * Display WA #1153: icl
973 	 * enable hardware to bypass the alpha math
974 	 * and rounding for per-pixel values 00 and 0xff
975 	 */
976 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
977 	/*
978 	 * Display WA # 1605353570: icl
979 	 * Set the pixel rounding bit to 1 for allowing
980 	 * passthrough of Frame buffer pixels unmodified
981 	 * across pipe
982 	 */
983 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
984 
985 	/*
986 	 * Underrun recovery must always be disabled on display 13+.
987 	 * DG2 chicken bit meaning is inverted compared to other platforms.
988 	 */
989 	if (IS_DG2(dev_priv))
990 		tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
991 	else if (DISPLAY_VER(dev_priv) >= 13)
992 		tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
993 
994 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
995 }
996 
997 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
998 {
999 	struct drm_crtc *crtc;
1000 	bool cleanup_done;
1001 
1002 	drm_for_each_crtc(crtc, &dev_priv->drm) {
1003 		struct drm_crtc_commit *commit;
1004 		spin_lock(&crtc->commit_lock);
1005 		commit = list_first_entry_or_null(&crtc->commit_list,
1006 						  struct drm_crtc_commit, commit_entry);
1007 		cleanup_done = commit ?
1008 			try_wait_for_completion(&commit->cleanup_done) : true;
1009 		spin_unlock(&crtc->commit_lock);
1010 
1011 		if (cleanup_done)
1012 			continue;
1013 
1014 		drm_crtc_wait_one_vblank(crtc);
1015 
1016 		return true;
1017 	}
1018 
1019 	return false;
1020 }
1021 
1022 /*
1023  * Finds the encoder associated with the given CRTC. This can only be
1024  * used when we know that the CRTC isn't feeding multiple encoders!
1025  */
1026 struct intel_encoder *
1027 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
1028 			   const struct intel_crtc_state *crtc_state)
1029 {
1030 	const struct drm_connector_state *connector_state;
1031 	const struct drm_connector *connector;
1032 	struct intel_encoder *encoder = NULL;
1033 	struct intel_crtc *master_crtc;
1034 	int num_encoders = 0;
1035 	int i;
1036 
1037 	master_crtc = intel_master_crtc(crtc_state);
1038 
1039 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
1040 		if (connector_state->crtc != &master_crtc->base)
1041 			continue;
1042 
1043 		encoder = to_intel_encoder(connector_state->best_encoder);
1044 		num_encoders++;
1045 	}
1046 
1047 	drm_WARN(encoder->base.dev, num_encoders != 1,
1048 		 "%d encoders for pipe %c\n",
1049 		 num_encoders, pipe_name(master_crtc->pipe));
1050 
1051 	return encoder;
1052 }
1053 
1054 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
1055 			       enum pipe pipe)
1056 {
1057 	i915_reg_t dslreg = PIPEDSL(pipe);
1058 	u32 temp;
1059 
1060 	temp = intel_de_read(dev_priv, dslreg);
1061 	udelay(500);
1062 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
1063 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
1064 			drm_err(&dev_priv->drm,
1065 				"mode set failed: pipe %c stuck\n",
1066 				pipe_name(pipe));
1067 	}
1068 }
1069 
1070 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
1071 {
1072 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1073 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1074 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
1075 	enum pipe pipe = crtc->pipe;
1076 	int width = drm_rect_width(dst);
1077 	int height = drm_rect_height(dst);
1078 	int x = dst->x1;
1079 	int y = dst->y1;
1080 
1081 	if (!crtc_state->pch_pfit.enabled)
1082 		return;
1083 
1084 	/* Force use of hard-coded filter coefficients
1085 	 * as some pre-programmed values are broken,
1086 	 * e.g. x201.
1087 	 */
1088 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
1089 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1090 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
1091 	else
1092 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
1093 			       PF_FILTER_MED_3x3);
1094 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
1095 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
1096 }
1097 
1098 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
1099 {
1100 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1101 	struct drm_device *dev = crtc->base.dev;
1102 	struct drm_i915_private *dev_priv = to_i915(dev);
1103 
1104 	if (!crtc_state->ips_enabled)
1105 		return;
1106 
1107 	/*
1108 	 * We can only enable IPS after we enable a plane and wait for a vblank
1109 	 * This function is called from post_plane_update, which is run after
1110 	 * a vblank wait.
1111 	 */
1112 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
1113 
1114 	if (IS_BROADWELL(dev_priv)) {
1115 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
1116 							 IPS_ENABLE | IPS_PCODE_CONTROL));
1117 		/* Quoting Art Runyan: "its not safe to expect any particular
1118 		 * value in IPS_CTL bit 31 after enabling IPS through the
1119 		 * mailbox." Moreover, the mailbox may return a bogus state,
1120 		 * so we need to just enable it and continue on.
1121 		 */
1122 	} else {
1123 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
1124 		/* The bit only becomes 1 in the next vblank, so this wait here
1125 		 * is essentially intel_wait_for_vblank. If we don't have this
1126 		 * and don't wait for vblanks until the end of crtc_enable, then
1127 		 * the HW state readout code will complain that the expected
1128 		 * IPS_CTL value is not the one we read. */
1129 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
1130 			drm_err(&dev_priv->drm,
1131 				"Timed out waiting for IPS enable\n");
1132 	}
1133 }
1134 
1135 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
1136 {
1137 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1138 	struct drm_device *dev = crtc->base.dev;
1139 	struct drm_i915_private *dev_priv = to_i915(dev);
1140 
1141 	if (!crtc_state->ips_enabled)
1142 		return;
1143 
1144 	if (IS_BROADWELL(dev_priv)) {
1145 		drm_WARN_ON(dev,
1146 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
1147 		/*
1148 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
1149 		 * 42ms timeout value leads to occasional timeouts so use 100ms
1150 		 * instead.
1151 		 */
1152 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
1153 			drm_err(&dev_priv->drm,
1154 				"Timed out waiting for IPS disable\n");
1155 	} else {
1156 		intel_de_write(dev_priv, IPS_CTL, 0);
1157 		intel_de_posting_read(dev_priv, IPS_CTL);
1158 	}
1159 
1160 	/* We need to wait for a vblank before we can disable the plane. */
1161 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1162 }
1163 
1164 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
1165 {
1166 	if (crtc->overlay)
1167 		(void) intel_overlay_switch_off(crtc->overlay);
1168 
1169 	/* Let userspace switch the overlay on again. In most cases userspace
1170 	 * has to recompute where to put it anyway.
1171 	 */
1172 }
1173 
1174 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
1175 				       const struct intel_crtc_state *new_crtc_state)
1176 {
1177 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1178 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1179 
1180 	if (!old_crtc_state->ips_enabled)
1181 		return false;
1182 
1183 	if (intel_crtc_needs_modeset(new_crtc_state))
1184 		return true;
1185 
1186 	/*
1187 	 * Workaround : Do not read or write the pipe palette/gamma data while
1188 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1189 	 *
1190 	 * Disable IPS before we program the LUT.
1191 	 */
1192 	if (IS_HASWELL(dev_priv) &&
1193 	    (new_crtc_state->uapi.color_mgmt_changed ||
1194 	     new_crtc_state->update_pipe) &&
1195 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1196 		return true;
1197 
1198 	return !new_crtc_state->ips_enabled;
1199 }
1200 
1201 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
1202 				       const struct intel_crtc_state *new_crtc_state)
1203 {
1204 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1205 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1206 
1207 	if (!new_crtc_state->ips_enabled)
1208 		return false;
1209 
1210 	if (intel_crtc_needs_modeset(new_crtc_state))
1211 		return true;
1212 
1213 	/*
1214 	 * Workaround : Do not read or write the pipe palette/gamma data while
1215 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
1216 	 *
1217 	 * Re-enable IPS after the LUT has been programmed.
1218 	 */
1219 	if (IS_HASWELL(dev_priv) &&
1220 	    (new_crtc_state->uapi.color_mgmt_changed ||
1221 	     new_crtc_state->update_pipe) &&
1222 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
1223 		return true;
1224 
1225 	/*
1226 	 * We can't read out IPS on broadwell, assume the worst and
1227 	 * forcibly enable IPS on the first fastset.
1228 	 */
1229 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
1230 		return true;
1231 
1232 	return !old_crtc_state->ips_enabled;
1233 }
1234 
1235 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
1236 {
1237 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1238 
1239 	if (!crtc_state->nv12_planes)
1240 		return false;
1241 
1242 	/* WA Display #0827: Gen9:all */
1243 	if (DISPLAY_VER(dev_priv) == 9)
1244 		return true;
1245 
1246 	return false;
1247 }
1248 
1249 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
1250 {
1251 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1252 
1253 	/* Wa_2006604312:icl,ehl */
1254 	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
1255 		return true;
1256 
1257 	return false;
1258 }
1259 
1260 static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
1261 {
1262 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1263 
1264 	/* Wa_1604331009:icl,jsl,ehl */
1265 	if (is_hdr_mode(crtc_state) &&
1266 	    crtc_state->active_planes & BIT(PLANE_CURSOR) &&
1267 	    DISPLAY_VER(dev_priv) == 11)
1268 		return true;
1269 
1270 	return false;
1271 }
1272 
1273 static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
1274 				    enum pipe pipe, bool enable)
1275 {
1276 	if (DISPLAY_VER(i915) == 9) {
1277 		/*
1278 		 * "Plane N strech max must be programmed to 11b (x1)
1279 		 *  when Async flips are enabled on that plane."
1280 		 */
1281 		intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1282 			     SKL_PLANE1_STRETCH_MAX_MASK,
1283 			     enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
1284 	} else {
1285 		/* Also needed on HSW/BDW albeit undocumented */
1286 		intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
1287 			     HSW_PRI_STRETCH_MAX_MASK,
1288 			     enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
1289 	}
1290 }
1291 
1292 static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
1293 {
1294 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1295 
1296 	return crtc_state->uapi.async_flip && intel_vtd_active(i915) &&
1297 		(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
1298 }
1299 
1300 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
1301 			    const struct intel_crtc_state *new_crtc_state)
1302 {
1303 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
1304 		new_crtc_state->active_planes;
1305 }
1306 
1307 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
1308 			     const struct intel_crtc_state *new_crtc_state)
1309 {
1310 	return old_crtc_state->active_planes &&
1311 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
1312 }
1313 
1314 static void intel_post_plane_update(struct intel_atomic_state *state,
1315 				    struct intel_crtc *crtc)
1316 {
1317 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1318 	const struct intel_crtc_state *old_crtc_state =
1319 		intel_atomic_get_old_crtc_state(state, crtc);
1320 	const struct intel_crtc_state *new_crtc_state =
1321 		intel_atomic_get_new_crtc_state(state, crtc);
1322 	enum pipe pipe = crtc->pipe;
1323 
1324 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
1325 
1326 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
1327 		intel_update_watermarks(dev_priv);
1328 
1329 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
1330 		hsw_enable_ips(new_crtc_state);
1331 
1332 	intel_fbc_post_update(state, crtc);
1333 	intel_drrs_page_flip(state, crtc);
1334 
1335 	if (needs_async_flip_vtd_wa(old_crtc_state) &&
1336 	    !needs_async_flip_vtd_wa(new_crtc_state))
1337 		intel_async_flip_vtd_wa(dev_priv, pipe, false);
1338 
1339 	if (needs_nv12_wa(old_crtc_state) &&
1340 	    !needs_nv12_wa(new_crtc_state))
1341 		skl_wa_827(dev_priv, pipe, false);
1342 
1343 	if (needs_scalerclk_wa(old_crtc_state) &&
1344 	    !needs_scalerclk_wa(new_crtc_state))
1345 		icl_wa_scalerclkgating(dev_priv, pipe, false);
1346 
1347 	if (needs_cursorclk_wa(old_crtc_state) &&
1348 	    !needs_cursorclk_wa(new_crtc_state))
1349 		icl_wa_cursorclkgating(dev_priv, pipe, false);
1350 
1351 }
1352 
1353 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
1354 					struct intel_crtc *crtc)
1355 {
1356 	const struct intel_crtc_state *crtc_state =
1357 		intel_atomic_get_new_crtc_state(state, crtc);
1358 	u8 update_planes = crtc_state->update_planes;
1359 	const struct intel_plane_state *plane_state;
1360 	struct intel_plane *plane;
1361 	int i;
1362 
1363 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1364 		if (plane->enable_flip_done &&
1365 		    plane->pipe == crtc->pipe &&
1366 		    update_planes & BIT(plane->id))
1367 			plane->enable_flip_done(plane);
1368 	}
1369 }
1370 
1371 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
1372 					 struct intel_crtc *crtc)
1373 {
1374 	const struct intel_crtc_state *crtc_state =
1375 		intel_atomic_get_new_crtc_state(state, crtc);
1376 	u8 update_planes = crtc_state->update_planes;
1377 	const struct intel_plane_state *plane_state;
1378 	struct intel_plane *plane;
1379 	int i;
1380 
1381 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1382 		if (plane->disable_flip_done &&
1383 		    plane->pipe == crtc->pipe &&
1384 		    update_planes & BIT(plane->id))
1385 			plane->disable_flip_done(plane);
1386 	}
1387 }
1388 
1389 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
1390 					     struct intel_crtc *crtc)
1391 {
1392 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1393 	const struct intel_crtc_state *old_crtc_state =
1394 		intel_atomic_get_old_crtc_state(state, crtc);
1395 	const struct intel_crtc_state *new_crtc_state =
1396 		intel_atomic_get_new_crtc_state(state, crtc);
1397 	u8 update_planes = new_crtc_state->update_planes;
1398 	const struct intel_plane_state *old_plane_state;
1399 	struct intel_plane *plane;
1400 	bool need_vbl_wait = false;
1401 	int i;
1402 
1403 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1404 		if (plane->need_async_flip_disable_wa &&
1405 		    plane->pipe == crtc->pipe &&
1406 		    update_planes & BIT(plane->id)) {
1407 			/*
1408 			 * Apart from the async flip bit we want to
1409 			 * preserve the old state for the plane.
1410 			 */
1411 			plane->async_flip(plane, old_crtc_state,
1412 					  old_plane_state, false);
1413 			need_vbl_wait = true;
1414 		}
1415 	}
1416 
1417 	if (need_vbl_wait)
1418 		intel_wait_for_vblank(i915, crtc->pipe);
1419 }
1420 
1421 static void intel_pre_plane_update(struct intel_atomic_state *state,
1422 				   struct intel_crtc *crtc)
1423 {
1424 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1425 	const struct intel_crtc_state *old_crtc_state =
1426 		intel_atomic_get_old_crtc_state(state, crtc);
1427 	const struct intel_crtc_state *new_crtc_state =
1428 		intel_atomic_get_new_crtc_state(state, crtc);
1429 	enum pipe pipe = crtc->pipe;
1430 
1431 	intel_psr_pre_plane_update(state, crtc);
1432 
1433 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
1434 		hsw_disable_ips(old_crtc_state);
1435 
1436 	if (intel_fbc_pre_update(state, crtc))
1437 		intel_wait_for_vblank(dev_priv, pipe);
1438 
1439 	if (!needs_async_flip_vtd_wa(old_crtc_state) &&
1440 	    needs_async_flip_vtd_wa(new_crtc_state))
1441 		intel_async_flip_vtd_wa(dev_priv, pipe, true);
1442 
1443 	/* Display WA 827 */
1444 	if (!needs_nv12_wa(old_crtc_state) &&
1445 	    needs_nv12_wa(new_crtc_state))
1446 		skl_wa_827(dev_priv, pipe, true);
1447 
1448 	/* Wa_2006604312:icl,ehl */
1449 	if (!needs_scalerclk_wa(old_crtc_state) &&
1450 	    needs_scalerclk_wa(new_crtc_state))
1451 		icl_wa_scalerclkgating(dev_priv, pipe, true);
1452 
1453 	/* Wa_1604331009:icl,jsl,ehl */
1454 	if (!needs_cursorclk_wa(old_crtc_state) &&
1455 	    needs_cursorclk_wa(new_crtc_state))
1456 		icl_wa_cursorclkgating(dev_priv, pipe, true);
1457 
1458 	/*
1459 	 * Vblank time updates from the shadow to live plane control register
1460 	 * are blocked if the memory self-refresh mode is active at that
1461 	 * moment. So to make sure the plane gets truly disabled, disable
1462 	 * first the self-refresh mode. The self-refresh enable bit in turn
1463 	 * will be checked/applied by the HW only at the next frame start
1464 	 * event which is after the vblank start event, so we need to have a
1465 	 * wait-for-vblank between disabling the plane and the pipe.
1466 	 */
1467 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
1468 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
1469 		intel_wait_for_vblank(dev_priv, pipe);
1470 
1471 	/*
1472 	 * IVB workaround: must disable low power watermarks for at least
1473 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
1474 	 * when scaling is disabled.
1475 	 *
1476 	 * WaCxSRDisabledForSpriteScaling:ivb
1477 	 */
1478 	if (old_crtc_state->hw.active &&
1479 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
1480 		intel_wait_for_vblank(dev_priv, pipe);
1481 
1482 	/*
1483 	 * If we're doing a modeset we don't need to do any
1484 	 * pre-vblank watermark programming here.
1485 	 */
1486 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
1487 		/*
1488 		 * For platforms that support atomic watermarks, program the
1489 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
1490 		 * will be the intermediate values that are safe for both pre- and
1491 		 * post- vblank; when vblank happens, the 'active' values will be set
1492 		 * to the final 'target' values and we'll do this again to get the
1493 		 * optimal watermarks.  For gen9+ platforms, the values we program here
1494 		 * will be the final target values which will get automatically latched
1495 		 * at vblank time; no further programming will be necessary.
1496 		 *
1497 		 * If a platform hasn't been transitioned to atomic watermarks yet,
1498 		 * we'll continue to update watermarks the old way, if flags tell
1499 		 * us to.
1500 		 */
1501 		if (!intel_initial_watermarks(state, crtc))
1502 			if (new_crtc_state->update_wm_pre)
1503 				intel_update_watermarks(dev_priv);
1504 	}
1505 
1506 	/*
1507 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1508 	 * So disable underrun reporting before all the planes get disabled.
1509 	 *
1510 	 * We do this after .initial_watermarks() so that we have a
1511 	 * chance of catching underruns with the intermediate watermarks
1512 	 * vs. the old plane configuration.
1513 	 */
1514 	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
1515 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1516 
1517 	/*
1518 	 * WA for platforms where async address update enable bit
1519 	 * is double buffered and only latched at start of vblank.
1520 	 */
1521 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
1522 		intel_crtc_async_flip_disable_wa(state, crtc);
1523 }
1524 
1525 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
1526 				      struct intel_crtc *crtc)
1527 {
1528 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1529 	const struct intel_crtc_state *new_crtc_state =
1530 		intel_atomic_get_new_crtc_state(state, crtc);
1531 	unsigned int update_mask = new_crtc_state->update_planes;
1532 	const struct intel_plane_state *old_plane_state;
1533 	struct intel_plane *plane;
1534 	unsigned fb_bits = 0;
1535 	int i;
1536 
1537 	intel_crtc_dpms_overlay_disable(crtc);
1538 
1539 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
1540 		if (crtc->pipe != plane->pipe ||
1541 		    !(update_mask & BIT(plane->id)))
1542 			continue;
1543 
1544 		intel_plane_disable_arm(plane, new_crtc_state);
1545 
1546 		if (old_plane_state->uapi.visible)
1547 			fb_bits |= plane->frontbuffer_bit;
1548 	}
1549 
1550 	intel_frontbuffer_flip(dev_priv, fb_bits);
1551 }
1552 
1553 /*
1554  * intel_connector_primary_encoder - get the primary encoder for a connector
1555  * @connector: connector for which to return the encoder
1556  *
1557  * Returns the primary encoder for a connector. There is a 1:1 mapping from
1558  * all connectors to their encoder, except for DP-MST connectors which have
1559  * both a virtual and a primary encoder. These DP-MST primary encoders can be
1560  * pointed to by as many DP-MST connectors as there are pipes.
1561  */
1562 static struct intel_encoder *
1563 intel_connector_primary_encoder(struct intel_connector *connector)
1564 {
1565 	struct intel_encoder *encoder;
1566 
1567 	if (connector->mst_port)
1568 		return &dp_to_dig_port(connector->mst_port)->base;
1569 
1570 	encoder = intel_attached_encoder(connector);
1571 	drm_WARN_ON(connector->base.dev, !encoder);
1572 
1573 	return encoder;
1574 }
1575 
1576 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
1577 {
1578 	struct drm_i915_private *i915 = to_i915(state->base.dev);
1579 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1580 	struct intel_crtc *crtc;
1581 	struct drm_connector_state *new_conn_state;
1582 	struct drm_connector *connector;
1583 	int i;
1584 
1585 	/*
1586 	 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
1587 	 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
1588 	 */
1589 	if (i915->dpll.mgr) {
1590 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1591 			if (intel_crtc_needs_modeset(new_crtc_state))
1592 				continue;
1593 
1594 			new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
1595 			new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
1596 		}
1597 	}
1598 
1599 	if (!state->modeset)
1600 		return;
1601 
1602 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1603 					i) {
1604 		struct intel_connector *intel_connector;
1605 		struct intel_encoder *encoder;
1606 		struct intel_crtc *crtc;
1607 
1608 		if (!intel_connector_needs_modeset(state, connector))
1609 			continue;
1610 
1611 		intel_connector = to_intel_connector(connector);
1612 		encoder = intel_connector_primary_encoder(intel_connector);
1613 		if (!encoder->update_prepare)
1614 			continue;
1615 
1616 		crtc = new_conn_state->crtc ?
1617 			to_intel_crtc(new_conn_state->crtc) : NULL;
1618 		encoder->update_prepare(state, encoder, crtc);
1619 	}
1620 }
1621 
1622 static void intel_encoders_update_complete(struct intel_atomic_state *state)
1623 {
1624 	struct drm_connector_state *new_conn_state;
1625 	struct drm_connector *connector;
1626 	int i;
1627 
1628 	if (!state->modeset)
1629 		return;
1630 
1631 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
1632 					i) {
1633 		struct intel_connector *intel_connector;
1634 		struct intel_encoder *encoder;
1635 		struct intel_crtc *crtc;
1636 
1637 		if (!intel_connector_needs_modeset(state, connector))
1638 			continue;
1639 
1640 		intel_connector = to_intel_connector(connector);
1641 		encoder = intel_connector_primary_encoder(intel_connector);
1642 		if (!encoder->update_complete)
1643 			continue;
1644 
1645 		crtc = new_conn_state->crtc ?
1646 			to_intel_crtc(new_conn_state->crtc) : NULL;
1647 		encoder->update_complete(state, encoder, crtc);
1648 	}
1649 }
1650 
1651 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
1652 					  struct intel_crtc *crtc)
1653 {
1654 	const struct intel_crtc_state *crtc_state =
1655 		intel_atomic_get_new_crtc_state(state, crtc);
1656 	const struct drm_connector_state *conn_state;
1657 	struct drm_connector *conn;
1658 	int i;
1659 
1660 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1661 		struct intel_encoder *encoder =
1662 			to_intel_encoder(conn_state->best_encoder);
1663 
1664 		if (conn_state->crtc != &crtc->base)
1665 			continue;
1666 
1667 		if (encoder->pre_pll_enable)
1668 			encoder->pre_pll_enable(state, encoder,
1669 						crtc_state, conn_state);
1670 	}
1671 }
1672 
1673 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
1674 				      struct intel_crtc *crtc)
1675 {
1676 	const struct intel_crtc_state *crtc_state =
1677 		intel_atomic_get_new_crtc_state(state, crtc);
1678 	const struct drm_connector_state *conn_state;
1679 	struct drm_connector *conn;
1680 	int i;
1681 
1682 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1683 		struct intel_encoder *encoder =
1684 			to_intel_encoder(conn_state->best_encoder);
1685 
1686 		if (conn_state->crtc != &crtc->base)
1687 			continue;
1688 
1689 		if (encoder->pre_enable)
1690 			encoder->pre_enable(state, encoder,
1691 					    crtc_state, conn_state);
1692 	}
1693 }
1694 
1695 static void intel_encoders_enable(struct intel_atomic_state *state,
1696 				  struct intel_crtc *crtc)
1697 {
1698 	const struct intel_crtc_state *crtc_state =
1699 		intel_atomic_get_new_crtc_state(state, crtc);
1700 	const struct drm_connector_state *conn_state;
1701 	struct drm_connector *conn;
1702 	int i;
1703 
1704 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1705 		struct intel_encoder *encoder =
1706 			to_intel_encoder(conn_state->best_encoder);
1707 
1708 		if (conn_state->crtc != &crtc->base)
1709 			continue;
1710 
1711 		if (encoder->enable)
1712 			encoder->enable(state, encoder,
1713 					crtc_state, conn_state);
1714 		intel_opregion_notify_encoder(encoder, true);
1715 	}
1716 }
1717 
1718 static void intel_encoders_disable(struct intel_atomic_state *state,
1719 				   struct intel_crtc *crtc)
1720 {
1721 	const struct intel_crtc_state *old_crtc_state =
1722 		intel_atomic_get_old_crtc_state(state, crtc);
1723 	const struct drm_connector_state *old_conn_state;
1724 	struct drm_connector *conn;
1725 	int i;
1726 
1727 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1728 		struct intel_encoder *encoder =
1729 			to_intel_encoder(old_conn_state->best_encoder);
1730 
1731 		if (old_conn_state->crtc != &crtc->base)
1732 			continue;
1733 
1734 		intel_opregion_notify_encoder(encoder, false);
1735 		if (encoder->disable)
1736 			encoder->disable(state, encoder,
1737 					 old_crtc_state, old_conn_state);
1738 	}
1739 }
1740 
1741 static void intel_encoders_post_disable(struct intel_atomic_state *state,
1742 					struct intel_crtc *crtc)
1743 {
1744 	const struct intel_crtc_state *old_crtc_state =
1745 		intel_atomic_get_old_crtc_state(state, crtc);
1746 	const struct drm_connector_state *old_conn_state;
1747 	struct drm_connector *conn;
1748 	int i;
1749 
1750 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1751 		struct intel_encoder *encoder =
1752 			to_intel_encoder(old_conn_state->best_encoder);
1753 
1754 		if (old_conn_state->crtc != &crtc->base)
1755 			continue;
1756 
1757 		if (encoder->post_disable)
1758 			encoder->post_disable(state, encoder,
1759 					      old_crtc_state, old_conn_state);
1760 	}
1761 }
1762 
1763 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
1764 					    struct intel_crtc *crtc)
1765 {
1766 	const struct intel_crtc_state *old_crtc_state =
1767 		intel_atomic_get_old_crtc_state(state, crtc);
1768 	const struct drm_connector_state *old_conn_state;
1769 	struct drm_connector *conn;
1770 	int i;
1771 
1772 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
1773 		struct intel_encoder *encoder =
1774 			to_intel_encoder(old_conn_state->best_encoder);
1775 
1776 		if (old_conn_state->crtc != &crtc->base)
1777 			continue;
1778 
1779 		if (encoder->post_pll_disable)
1780 			encoder->post_pll_disable(state, encoder,
1781 						  old_crtc_state, old_conn_state);
1782 	}
1783 }
1784 
1785 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
1786 				       struct intel_crtc *crtc)
1787 {
1788 	const struct intel_crtc_state *crtc_state =
1789 		intel_atomic_get_new_crtc_state(state, crtc);
1790 	const struct drm_connector_state *conn_state;
1791 	struct drm_connector *conn;
1792 	int i;
1793 
1794 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1795 		struct intel_encoder *encoder =
1796 			to_intel_encoder(conn_state->best_encoder);
1797 
1798 		if (conn_state->crtc != &crtc->base)
1799 			continue;
1800 
1801 		if (encoder->update_pipe)
1802 			encoder->update_pipe(state, encoder,
1803 					     crtc_state, conn_state);
1804 	}
1805 }
1806 
1807 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
1808 {
1809 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1810 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1811 
1812 	plane->disable_arm(plane, crtc_state);
1813 }
1814 
1815 static void ilk_crtc_enable(struct intel_atomic_state *state,
1816 			    struct intel_crtc *crtc)
1817 {
1818 	const struct intel_crtc_state *new_crtc_state =
1819 		intel_atomic_get_new_crtc_state(state, crtc);
1820 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1821 	enum pipe pipe = crtc->pipe;
1822 
1823 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
1824 		return;
1825 
1826 	/*
1827 	 * Sometimes spurious CPU pipe underruns happen during FDI
1828 	 * training, at least with VGA+HDMI cloning. Suppress them.
1829 	 *
1830 	 * On ILK we get an occasional spurious CPU pipe underruns
1831 	 * between eDP port A enable and vdd enable. Also PCH port
1832 	 * enable seems to result in the occasional CPU pipe underrun.
1833 	 *
1834 	 * Spurious PCH underruns also occur during PCH enabling.
1835 	 */
1836 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1837 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
1838 
1839 	if (intel_crtc_has_dp_encoder(new_crtc_state))
1840 		intel_dp_set_m_n(new_crtc_state, M1_N1);
1841 
1842 	intel_set_transcoder_timings(new_crtc_state);
1843 	intel_set_pipe_src_size(new_crtc_state);
1844 
1845 	if (new_crtc_state->has_pch_encoder)
1846 		intel_cpu_transcoder_set_m_n(new_crtc_state,
1847 					     &new_crtc_state->fdi_m_n, NULL);
1848 
1849 	ilk_set_pipeconf(new_crtc_state);
1850 
1851 	crtc->active = true;
1852 
1853 	intel_encoders_pre_enable(state, crtc);
1854 
1855 	if (new_crtc_state->has_pch_encoder) {
1856 		/* Note: FDI PLL enabling _must_ be done before we enable the
1857 		 * cpu pipes, hence this is separate from all the other fdi/pch
1858 		 * enabling. */
1859 		ilk_fdi_pll_enable(new_crtc_state);
1860 	} else {
1861 		assert_fdi_tx_disabled(dev_priv, pipe);
1862 		assert_fdi_rx_disabled(dev_priv, pipe);
1863 	}
1864 
1865 	ilk_pfit_enable(new_crtc_state);
1866 
1867 	/*
1868 	 * On ILK+ LUT must be loaded before the pipe is running but with
1869 	 * clocks enabled
1870 	 */
1871 	intel_color_load_luts(new_crtc_state);
1872 	intel_color_commit(new_crtc_state);
1873 	/* update DSPCNTR to configure gamma for pipe bottom color */
1874 	intel_disable_primary_plane(new_crtc_state);
1875 
1876 	intel_initial_watermarks(state, crtc);
1877 	intel_enable_transcoder(new_crtc_state);
1878 
1879 	if (new_crtc_state->has_pch_encoder)
1880 		ilk_pch_enable(state, crtc);
1881 
1882 	intel_crtc_vblank_on(new_crtc_state);
1883 
1884 	intel_encoders_enable(state, crtc);
1885 
1886 	if (HAS_PCH_CPT(dev_priv))
1887 		cpt_verify_modeset(dev_priv, pipe);
1888 
1889 	/*
1890 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
1891 	 * And a second vblank wait is needed at least on ILK with
1892 	 * some interlaced HDMI modes. Let's do the double wait always
1893 	 * in case there are more corner cases we don't know about.
1894 	 */
1895 	if (new_crtc_state->has_pch_encoder) {
1896 		intel_wait_for_vblank(dev_priv, pipe);
1897 		intel_wait_for_vblank(dev_priv, pipe);
1898 	}
1899 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
1900 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
1901 }
1902 
1903 /* IPS only exists on ULT machines and is tied to pipe A. */
1904 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
1905 {
1906 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
1907 }
1908 
1909 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
1910 					    enum pipe pipe, bool apply)
1911 {
1912 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
1913 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
1914 
1915 	if (apply)
1916 		val |= mask;
1917 	else
1918 		val &= ~mask;
1919 
1920 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
1921 }
1922 
1923 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
1924 {
1925 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1926 	enum pipe pipe = crtc->pipe;
1927 	u32 val;
1928 
1929 	/* Wa_22010947358:adl-p */
1930 	if (IS_ALDERLAKE_P(dev_priv))
1931 		val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
1932 	else
1933 		val = MBUS_DBOX_A_CREDIT(2);
1934 
1935 	if (DISPLAY_VER(dev_priv) >= 12) {
1936 		val |= MBUS_DBOX_BW_CREDIT(2);
1937 		val |= MBUS_DBOX_B_CREDIT(12);
1938 	} else {
1939 		val |= MBUS_DBOX_BW_CREDIT(1);
1940 		val |= MBUS_DBOX_B_CREDIT(8);
1941 	}
1942 
1943 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
1944 }
1945 
1946 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
1947 {
1948 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1949 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1950 
1951 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
1952 		       HSW_LINETIME(crtc_state->linetime) |
1953 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
1954 }
1955 
1956 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
1957 {
1958 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1959 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1960 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
1961 	u32 val;
1962 
1963 	val = intel_de_read(dev_priv, reg);
1964 	val &= ~HSW_FRAME_START_DELAY_MASK;
1965 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
1966 	intel_de_write(dev_priv, reg, val);
1967 }
1968 
1969 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
1970 					 const struct intel_crtc_state *crtc_state)
1971 {
1972 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
1973 	struct intel_crtc_state *master_crtc_state;
1974 	struct intel_crtc *master_crtc;
1975 	struct drm_connector_state *conn_state;
1976 	struct drm_connector *conn;
1977 	struct intel_encoder *encoder = NULL;
1978 	int i;
1979 
1980 	master_crtc = intel_master_crtc(crtc_state);
1981 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
1982 
1983 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
1984 		if (conn_state->crtc != &master_crtc->base)
1985 			continue;
1986 
1987 		encoder = to_intel_encoder(conn_state->best_encoder);
1988 		break;
1989 	}
1990 
1991 	/*
1992 	 * Enable sequence steps 1-7 on bigjoiner master
1993 	 */
1994 	if (crtc_state->bigjoiner_slave)
1995 		intel_encoders_pre_pll_enable(state, master_crtc);
1996 
1997 	if (crtc_state->shared_dpll)
1998 		intel_enable_shared_dpll(crtc_state);
1999 
2000 	if (crtc_state->bigjoiner_slave)
2001 		intel_encoders_pre_enable(state, master_crtc);
2002 
2003 	/* need to enable VDSC, which we skipped in pre-enable */
2004 	intel_dsc_enable(crtc_state);
2005 
2006 	if (DISPLAY_VER(dev_priv) >= 13)
2007 		intel_uncompressed_joiner_enable(crtc_state);
2008 }
2009 
2010 static void hsw_crtc_enable(struct intel_atomic_state *state,
2011 			    struct intel_crtc *crtc)
2012 {
2013 	const struct intel_crtc_state *new_crtc_state =
2014 		intel_atomic_get_new_crtc_state(state, crtc);
2015 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2016 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
2017 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
2018 	bool psl_clkgate_wa;
2019 
2020 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2021 		return;
2022 
2023 	if (!new_crtc_state->bigjoiner) {
2024 		intel_encoders_pre_pll_enable(state, crtc);
2025 
2026 		if (new_crtc_state->shared_dpll)
2027 			intel_enable_shared_dpll(new_crtc_state);
2028 
2029 		intel_encoders_pre_enable(state, crtc);
2030 	} else {
2031 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
2032 	}
2033 
2034 	intel_set_pipe_src_size(new_crtc_state);
2035 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
2036 		bdw_set_pipemisc(new_crtc_state);
2037 
2038 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
2039 		intel_set_transcoder_timings(new_crtc_state);
2040 
2041 		if (cpu_transcoder != TRANSCODER_EDP)
2042 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
2043 				       new_crtc_state->pixel_multiplier - 1);
2044 
2045 		if (new_crtc_state->has_pch_encoder)
2046 			intel_cpu_transcoder_set_m_n(new_crtc_state,
2047 						     &new_crtc_state->fdi_m_n, NULL);
2048 
2049 		hsw_set_frame_start_delay(new_crtc_state);
2050 
2051 		hsw_set_transconf(new_crtc_state);
2052 	}
2053 
2054 	crtc->active = true;
2055 
2056 	/* Display WA #1180: WaDisableScalarClockGating: glk */
2057 	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
2058 		new_crtc_state->pch_pfit.enabled;
2059 	if (psl_clkgate_wa)
2060 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
2061 
2062 	if (DISPLAY_VER(dev_priv) >= 9)
2063 		skl_pfit_enable(new_crtc_state);
2064 	else
2065 		ilk_pfit_enable(new_crtc_state);
2066 
2067 	/*
2068 	 * On ILK+ LUT must be loaded before the pipe is running but with
2069 	 * clocks enabled
2070 	 */
2071 	intel_color_load_luts(new_crtc_state);
2072 	intel_color_commit(new_crtc_state);
2073 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
2074 	if (DISPLAY_VER(dev_priv) < 9)
2075 		intel_disable_primary_plane(new_crtc_state);
2076 
2077 	hsw_set_linetime_wm(new_crtc_state);
2078 
2079 	if (DISPLAY_VER(dev_priv) >= 11)
2080 		icl_set_pipe_chicken(new_crtc_state);
2081 
2082 	intel_initial_watermarks(state, crtc);
2083 
2084 	if (DISPLAY_VER(dev_priv) >= 11) {
2085 		const struct intel_dbuf_state *dbuf_state =
2086 				intel_atomic_get_new_dbuf_state(state);
2087 
2088 		icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
2089 	}
2090 
2091 	if (new_crtc_state->bigjoiner_slave)
2092 		intel_crtc_vblank_on(new_crtc_state);
2093 
2094 	intel_encoders_enable(state, crtc);
2095 
2096 	if (psl_clkgate_wa) {
2097 		intel_wait_for_vblank(dev_priv, pipe);
2098 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
2099 	}
2100 
2101 	/* If we change the relative order between pipe/planes enabling, we need
2102 	 * to change the workaround. */
2103 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
2104 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
2105 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
2106 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
2107 	}
2108 }
2109 
2110 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2111 {
2112 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2113 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2114 	enum pipe pipe = crtc->pipe;
2115 
2116 	/* To avoid upsetting the power well on haswell only disable the pfit if
2117 	 * it's in use. The hw state code will make sure we get this right. */
2118 	if (!old_crtc_state->pch_pfit.enabled)
2119 		return;
2120 
2121 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
2122 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
2123 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
2124 }
2125 
2126 static void ilk_crtc_disable(struct intel_atomic_state *state,
2127 			     struct intel_crtc *crtc)
2128 {
2129 	const struct intel_crtc_state *old_crtc_state =
2130 		intel_atomic_get_old_crtc_state(state, crtc);
2131 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2132 	enum pipe pipe = crtc->pipe;
2133 
2134 	/*
2135 	 * Sometimes spurious CPU pipe underruns happen when the
2136 	 * pipe is already disabled, but FDI RX/TX is still enabled.
2137 	 * Happens at least with VGA+HDMI cloning. Suppress them.
2138 	 */
2139 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2140 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2141 
2142 	intel_encoders_disable(state, crtc);
2143 
2144 	intel_crtc_vblank_off(old_crtc_state);
2145 
2146 	intel_disable_transcoder(old_crtc_state);
2147 
2148 	ilk_pfit_disable(old_crtc_state);
2149 
2150 	if (old_crtc_state->has_pch_encoder)
2151 		ilk_pch_disable(state, crtc);
2152 
2153 	intel_encoders_post_disable(state, crtc);
2154 
2155 	if (old_crtc_state->has_pch_encoder)
2156 		ilk_pch_post_disable(state, crtc);
2157 
2158 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2159 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
2160 }
2161 
2162 static void hsw_crtc_disable(struct intel_atomic_state *state,
2163 			     struct intel_crtc *crtc)
2164 {
2165 	const struct intel_crtc_state *old_crtc_state =
2166 		intel_atomic_get_old_crtc_state(state, crtc);
2167 
2168 	/*
2169 	 * FIXME collapse everything to one hook.
2170 	 * Need care with mst->ddi interactions.
2171 	 */
2172 	if (!old_crtc_state->bigjoiner_slave) {
2173 		intel_encoders_disable(state, crtc);
2174 		intel_encoders_post_disable(state, crtc);
2175 	}
2176 }
2177 
2178 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2179 {
2180 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2181 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2182 
2183 	if (!crtc_state->gmch_pfit.control)
2184 		return;
2185 
2186 	/*
2187 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
2188 	 * according to register description and PRM.
2189 	 */
2190 	drm_WARN_ON(&dev_priv->drm,
2191 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
2192 	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
2193 
2194 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
2195 		       crtc_state->gmch_pfit.pgm_ratios);
2196 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
2197 
2198 	/* Border color in case we don't scale up to the full screen. Black by
2199 	 * default, change to something else for debugging. */
2200 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
2201 }
2202 
2203 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
2204 {
2205 	if (phy == PHY_NONE)
2206 		return false;
2207 	else if (IS_DG2(dev_priv))
2208 		/*
2209 		 * DG2 outputs labelled as "combo PHY" in the bspec use
2210 		 * SNPS PHYs with completely different programming,
2211 		 * hence we always return false here.
2212 		 */
2213 		return false;
2214 	else if (IS_ALDERLAKE_S(dev_priv))
2215 		return phy <= PHY_E;
2216 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
2217 		return phy <= PHY_D;
2218 	else if (IS_JSL_EHL(dev_priv))
2219 		return phy <= PHY_C;
2220 	else if (DISPLAY_VER(dev_priv) >= 11)
2221 		return phy <= PHY_B;
2222 	else
2223 		return false;
2224 }
2225 
2226 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
2227 {
2228 	if (IS_DG2(dev_priv))
2229 		/* DG2's "TC1" output uses a SNPS PHY */
2230 		return false;
2231 	else if (IS_ALDERLAKE_P(dev_priv))
2232 		return phy >= PHY_F && phy <= PHY_I;
2233 	else if (IS_TIGERLAKE(dev_priv))
2234 		return phy >= PHY_D && phy <= PHY_I;
2235 	else if (IS_ICELAKE(dev_priv))
2236 		return phy >= PHY_C && phy <= PHY_F;
2237 	else
2238 		return false;
2239 }
2240 
2241 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
2242 {
2243 	if (phy == PHY_NONE)
2244 		return false;
2245 	else if (IS_DG2(dev_priv))
2246 		/*
2247 		 * All four "combo" ports and the TC1 port (PHY E) use
2248 		 * Synopsis PHYs.
2249 		 */
2250 		return phy <= PHY_E;
2251 
2252 	return false;
2253 }
2254 
2255 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
2256 {
2257 	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
2258 		return PHY_D + port - PORT_D_XELPD;
2259 	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
2260 		return PHY_F + port - PORT_TC1;
2261 	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
2262 		return PHY_B + port - PORT_TC1;
2263 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
2264 		return PHY_C + port - PORT_TC1;
2265 	else if (IS_JSL_EHL(i915) && port == PORT_D)
2266 		return PHY_A;
2267 
2268 	return PHY_A + port - PORT_A;
2269 }
2270 
2271 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
2272 {
2273 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
2274 		return TC_PORT_NONE;
2275 
2276 	if (DISPLAY_VER(dev_priv) >= 12)
2277 		return TC_PORT_1 + port - PORT_TC1;
2278 	else
2279 		return TC_PORT_1 + port - PORT_C;
2280 }
2281 
2282 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
2283 {
2284 	switch (port) {
2285 	case PORT_A:
2286 		return POWER_DOMAIN_PORT_DDI_A_LANES;
2287 	case PORT_B:
2288 		return POWER_DOMAIN_PORT_DDI_B_LANES;
2289 	case PORT_C:
2290 		return POWER_DOMAIN_PORT_DDI_C_LANES;
2291 	case PORT_D:
2292 		return POWER_DOMAIN_PORT_DDI_D_LANES;
2293 	case PORT_E:
2294 		return POWER_DOMAIN_PORT_DDI_E_LANES;
2295 	case PORT_F:
2296 		return POWER_DOMAIN_PORT_DDI_F_LANES;
2297 	case PORT_G:
2298 		return POWER_DOMAIN_PORT_DDI_G_LANES;
2299 	case PORT_H:
2300 		return POWER_DOMAIN_PORT_DDI_H_LANES;
2301 	case PORT_I:
2302 		return POWER_DOMAIN_PORT_DDI_I_LANES;
2303 	default:
2304 		MISSING_CASE(port);
2305 		return POWER_DOMAIN_PORT_OTHER;
2306 	}
2307 }
2308 
2309 enum intel_display_power_domain
2310 intel_aux_power_domain(struct intel_digital_port *dig_port)
2311 {
2312 	if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
2313 		switch (dig_port->aux_ch) {
2314 		case AUX_CH_C:
2315 			return POWER_DOMAIN_AUX_C_TBT;
2316 		case AUX_CH_D:
2317 			return POWER_DOMAIN_AUX_D_TBT;
2318 		case AUX_CH_E:
2319 			return POWER_DOMAIN_AUX_E_TBT;
2320 		case AUX_CH_F:
2321 			return POWER_DOMAIN_AUX_F_TBT;
2322 		case AUX_CH_G:
2323 			return POWER_DOMAIN_AUX_G_TBT;
2324 		case AUX_CH_H:
2325 			return POWER_DOMAIN_AUX_H_TBT;
2326 		case AUX_CH_I:
2327 			return POWER_DOMAIN_AUX_I_TBT;
2328 		default:
2329 			MISSING_CASE(dig_port->aux_ch);
2330 			return POWER_DOMAIN_AUX_C_TBT;
2331 		}
2332 	}
2333 
2334 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
2335 }
2336 
2337 /*
2338  * Converts aux_ch to power_domain without caring about TBT ports for that use
2339  * intel_aux_power_domain()
2340  */
2341 enum intel_display_power_domain
2342 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
2343 {
2344 	switch (aux_ch) {
2345 	case AUX_CH_A:
2346 		return POWER_DOMAIN_AUX_A;
2347 	case AUX_CH_B:
2348 		return POWER_DOMAIN_AUX_B;
2349 	case AUX_CH_C:
2350 		return POWER_DOMAIN_AUX_C;
2351 	case AUX_CH_D:
2352 		return POWER_DOMAIN_AUX_D;
2353 	case AUX_CH_E:
2354 		return POWER_DOMAIN_AUX_E;
2355 	case AUX_CH_F:
2356 		return POWER_DOMAIN_AUX_F;
2357 	case AUX_CH_G:
2358 		return POWER_DOMAIN_AUX_G;
2359 	case AUX_CH_H:
2360 		return POWER_DOMAIN_AUX_H;
2361 	case AUX_CH_I:
2362 		return POWER_DOMAIN_AUX_I;
2363 	default:
2364 		MISSING_CASE(aux_ch);
2365 		return POWER_DOMAIN_AUX_A;
2366 	}
2367 }
2368 
2369 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2370 {
2371 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2372 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2373 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2374 	struct drm_encoder *encoder;
2375 	enum pipe pipe = crtc->pipe;
2376 	u64 mask;
2377 
2378 	if (!crtc_state->hw.active)
2379 		return 0;
2380 
2381 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
2382 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
2383 	if (crtc_state->pch_pfit.enabled ||
2384 	    crtc_state->pch_pfit.force_thru)
2385 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
2386 
2387 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
2388 				  crtc_state->uapi.encoder_mask) {
2389 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2390 
2391 		mask |= BIT_ULL(intel_encoder->power_domain);
2392 	}
2393 
2394 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
2395 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
2396 
2397 	if (crtc_state->shared_dpll)
2398 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
2399 
2400 	if (crtc_state->dsc.compression_enable)
2401 		mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
2402 
2403 	return mask;
2404 }
2405 
2406 static u64
2407 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
2408 {
2409 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2410 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2411 	enum intel_display_power_domain domain;
2412 	u64 domains, new_domains, old_domains;
2413 
2414 	domains = get_crtc_power_domains(crtc_state);
2415 
2416 	new_domains = domains & ~crtc->enabled_power_domains.mask;
2417 	old_domains = crtc->enabled_power_domains.mask & ~domains;
2418 
2419 	for_each_power_domain(domain, new_domains)
2420 		intel_display_power_get_in_set(dev_priv,
2421 					       &crtc->enabled_power_domains,
2422 					       domain);
2423 
2424 	return old_domains;
2425 }
2426 
2427 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
2428 					   u64 domains)
2429 {
2430 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
2431 					    &crtc->enabled_power_domains,
2432 					    domains);
2433 }
2434 
2435 static void valleyview_crtc_enable(struct intel_atomic_state *state,
2436 				   struct intel_crtc *crtc)
2437 {
2438 	const struct intel_crtc_state *new_crtc_state =
2439 		intel_atomic_get_new_crtc_state(state, crtc);
2440 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2441 	enum pipe pipe = crtc->pipe;
2442 
2443 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2444 		return;
2445 
2446 	if (intel_crtc_has_dp_encoder(new_crtc_state))
2447 		intel_dp_set_m_n(new_crtc_state, M1_N1);
2448 
2449 	intel_set_transcoder_timings(new_crtc_state);
2450 	intel_set_pipe_src_size(new_crtc_state);
2451 
2452 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
2453 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
2454 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
2455 	}
2456 
2457 	i9xx_set_pipeconf(new_crtc_state);
2458 
2459 	crtc->active = true;
2460 
2461 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2462 
2463 	intel_encoders_pre_pll_enable(state, crtc);
2464 
2465 	if (IS_CHERRYVIEW(dev_priv))
2466 		chv_enable_pll(new_crtc_state);
2467 	else
2468 		vlv_enable_pll(new_crtc_state);
2469 
2470 	intel_encoders_pre_enable(state, crtc);
2471 
2472 	i9xx_pfit_enable(new_crtc_state);
2473 
2474 	intel_color_load_luts(new_crtc_state);
2475 	intel_color_commit(new_crtc_state);
2476 	/* update DSPCNTR to configure gamma for pipe bottom color */
2477 	intel_disable_primary_plane(new_crtc_state);
2478 
2479 	intel_initial_watermarks(state, crtc);
2480 	intel_enable_transcoder(new_crtc_state);
2481 
2482 	intel_crtc_vblank_on(new_crtc_state);
2483 
2484 	intel_encoders_enable(state, crtc);
2485 }
2486 
2487 static void i9xx_crtc_enable(struct intel_atomic_state *state,
2488 			     struct intel_crtc *crtc)
2489 {
2490 	const struct intel_crtc_state *new_crtc_state =
2491 		intel_atomic_get_new_crtc_state(state, crtc);
2492 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2493 	enum pipe pipe = crtc->pipe;
2494 
2495 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2496 		return;
2497 
2498 	if (intel_crtc_has_dp_encoder(new_crtc_state))
2499 		intel_dp_set_m_n(new_crtc_state, M1_N1);
2500 
2501 	intel_set_transcoder_timings(new_crtc_state);
2502 	intel_set_pipe_src_size(new_crtc_state);
2503 
2504 	i9xx_set_pipeconf(new_crtc_state);
2505 
2506 	crtc->active = true;
2507 
2508 	if (DISPLAY_VER(dev_priv) != 2)
2509 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2510 
2511 	intel_encoders_pre_enable(state, crtc);
2512 
2513 	i9xx_enable_pll(new_crtc_state);
2514 
2515 	i9xx_pfit_enable(new_crtc_state);
2516 
2517 	intel_color_load_luts(new_crtc_state);
2518 	intel_color_commit(new_crtc_state);
2519 	/* update DSPCNTR to configure gamma for pipe bottom color */
2520 	intel_disable_primary_plane(new_crtc_state);
2521 
2522 	if (!intel_initial_watermarks(state, crtc))
2523 		intel_update_watermarks(dev_priv);
2524 	intel_enable_transcoder(new_crtc_state);
2525 
2526 	intel_crtc_vblank_on(new_crtc_state);
2527 
2528 	intel_encoders_enable(state, crtc);
2529 
2530 	/* prevents spurious underruns */
2531 	if (DISPLAY_VER(dev_priv) == 2)
2532 		intel_wait_for_vblank(dev_priv, pipe);
2533 }
2534 
2535 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
2536 {
2537 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2538 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2539 
2540 	if (!old_crtc_state->gmch_pfit.control)
2541 		return;
2542 
2543 	assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2544 
2545 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
2546 		    intel_de_read(dev_priv, PFIT_CONTROL));
2547 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
2548 }
2549 
2550 static void i9xx_crtc_disable(struct intel_atomic_state *state,
2551 			      struct intel_crtc *crtc)
2552 {
2553 	struct intel_crtc_state *old_crtc_state =
2554 		intel_atomic_get_old_crtc_state(state, crtc);
2555 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2556 	enum pipe pipe = crtc->pipe;
2557 
2558 	/*
2559 	 * On gen2 planes are double buffered but the pipe isn't, so we must
2560 	 * wait for planes to fully turn off before disabling the pipe.
2561 	 */
2562 	if (DISPLAY_VER(dev_priv) == 2)
2563 		intel_wait_for_vblank(dev_priv, pipe);
2564 
2565 	intel_encoders_disable(state, crtc);
2566 
2567 	intel_crtc_vblank_off(old_crtc_state);
2568 
2569 	intel_disable_transcoder(old_crtc_state);
2570 
2571 	i9xx_pfit_disable(old_crtc_state);
2572 
2573 	intel_encoders_post_disable(state, crtc);
2574 
2575 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
2576 		if (IS_CHERRYVIEW(dev_priv))
2577 			chv_disable_pll(dev_priv, pipe);
2578 		else if (IS_VALLEYVIEW(dev_priv))
2579 			vlv_disable_pll(dev_priv, pipe);
2580 		else
2581 			i9xx_disable_pll(old_crtc_state);
2582 	}
2583 
2584 	intel_encoders_post_pll_disable(state, crtc);
2585 
2586 	if (DISPLAY_VER(dev_priv) != 2)
2587 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2588 
2589 	if (!dev_priv->wm_disp->initial_watermarks)
2590 		intel_update_watermarks(dev_priv);
2591 
2592 	/* clock the pipe down to 640x480@60 to potentially save power */
2593 	if (IS_I830(dev_priv))
2594 		i830_enable_pipe(dev_priv, pipe);
2595 }
2596 
2597 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
2598 					struct drm_modeset_acquire_ctx *ctx)
2599 {
2600 	struct intel_encoder *encoder;
2601 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2602 	struct intel_bw_state *bw_state =
2603 		to_intel_bw_state(dev_priv->bw_obj.state);
2604 	struct intel_cdclk_state *cdclk_state =
2605 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
2606 	struct intel_dbuf_state *dbuf_state =
2607 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
2608 	struct intel_crtc_state *crtc_state =
2609 		to_intel_crtc_state(crtc->base.state);
2610 	struct intel_plane *plane;
2611 	struct drm_atomic_state *state;
2612 	struct intel_crtc_state *temp_crtc_state;
2613 	enum pipe pipe = crtc->pipe;
2614 	int ret;
2615 
2616 	if (!crtc_state->hw.active)
2617 		return;
2618 
2619 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2620 		const struct intel_plane_state *plane_state =
2621 			to_intel_plane_state(plane->base.state);
2622 
2623 		if (plane_state->uapi.visible)
2624 			intel_plane_disable_noatomic(crtc, plane);
2625 	}
2626 
2627 	state = drm_atomic_state_alloc(&dev_priv->drm);
2628 	if (!state) {
2629 		drm_dbg_kms(&dev_priv->drm,
2630 			    "failed to disable [CRTC:%d:%s], out of memory",
2631 			    crtc->base.base.id, crtc->base.name);
2632 		return;
2633 	}
2634 
2635 	state->acquire_ctx = ctx;
2636 
2637 	/* Everything's already locked, -EDEADLK can't happen. */
2638 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
2639 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
2640 
2641 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
2642 
2643 	dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
2644 
2645 	drm_atomic_state_put(state);
2646 
2647 	drm_dbg_kms(&dev_priv->drm,
2648 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
2649 		    crtc->base.base.id, crtc->base.name);
2650 
2651 	crtc->active = false;
2652 	crtc->base.enabled = false;
2653 
2654 	drm_WARN_ON(&dev_priv->drm,
2655 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
2656 	crtc_state->uapi.active = false;
2657 	crtc_state->uapi.connector_mask = 0;
2658 	crtc_state->uapi.encoder_mask = 0;
2659 	intel_crtc_free_hw_state(crtc_state);
2660 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
2661 
2662 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
2663 		encoder->base.crtc = NULL;
2664 
2665 	intel_fbc_disable(crtc);
2666 	intel_update_watermarks(dev_priv);
2667 	intel_disable_shared_dpll(crtc_state);
2668 
2669 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
2670 
2671 	cdclk_state->min_cdclk[pipe] = 0;
2672 	cdclk_state->min_voltage_level[pipe] = 0;
2673 	cdclk_state->active_pipes &= ~BIT(pipe);
2674 
2675 	dbuf_state->active_pipes &= ~BIT(pipe);
2676 
2677 	bw_state->data_rate[pipe] = 0;
2678 	bw_state->num_active_planes[pipe] = 0;
2679 }
2680 
2681 /*
2682  * turn all crtc's off, but do not adjust state
2683  * This has to be paired with a call to intel_modeset_setup_hw_state.
2684  */
2685 int intel_display_suspend(struct drm_device *dev)
2686 {
2687 	struct drm_i915_private *dev_priv = to_i915(dev);
2688 	struct drm_atomic_state *state;
2689 	int ret;
2690 
2691 	if (!HAS_DISPLAY(dev_priv))
2692 		return 0;
2693 
2694 	state = drm_atomic_helper_suspend(dev);
2695 	ret = PTR_ERR_OR_ZERO(state);
2696 	if (ret)
2697 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
2698 			ret);
2699 	else
2700 		dev_priv->modeset_restore_state = state;
2701 	return ret;
2702 }
2703 
2704 void intel_encoder_destroy(struct drm_encoder *encoder)
2705 {
2706 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
2707 
2708 	drm_encoder_cleanup(encoder);
2709 	kfree(intel_encoder);
2710 }
2711 
2712 /* Cross check the actual hw state with our own modeset state tracking (and it's
2713  * internal consistency). */
2714 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
2715 					 struct drm_connector_state *conn_state)
2716 {
2717 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
2718 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2719 
2720 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
2721 		    connector->base.base.id, connector->base.name);
2722 
2723 	if (connector->get_hw_state(connector)) {
2724 		struct intel_encoder *encoder = intel_attached_encoder(connector);
2725 
2726 		I915_STATE_WARN(!crtc_state,
2727 			 "connector enabled without attached crtc\n");
2728 
2729 		if (!crtc_state)
2730 			return;
2731 
2732 		I915_STATE_WARN(!crtc_state->hw.active,
2733 				"connector is active, but attached crtc isn't\n");
2734 
2735 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
2736 			return;
2737 
2738 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
2739 			"atomic encoder doesn't match attached encoder\n");
2740 
2741 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
2742 			"attached encoder crtc differs from connector crtc\n");
2743 	} else {
2744 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
2745 				"attached crtc is active, but connector isn't\n");
2746 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
2747 			"best encoder set without crtc!\n");
2748 	}
2749 }
2750 
2751 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
2752 {
2753 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2754 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2755 
2756 	/* IPS only exists on ULT machines and is tied to pipe A. */
2757 	if (!hsw_crtc_supports_ips(crtc))
2758 		return false;
2759 
2760 	if (!dev_priv->params.enable_ips)
2761 		return false;
2762 
2763 	if (crtc_state->pipe_bpp > 24)
2764 		return false;
2765 
2766 	/*
2767 	 * We compare against max which means we must take
2768 	 * the increased cdclk requirement into account when
2769 	 * calculating the new cdclk.
2770 	 *
2771 	 * Should measure whether using a lower cdclk w/o IPS
2772 	 */
2773 	if (IS_BROADWELL(dev_priv) &&
2774 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
2775 		return false;
2776 
2777 	return true;
2778 }
2779 
2780 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
2781 {
2782 	struct drm_i915_private *dev_priv =
2783 		to_i915(crtc_state->uapi.crtc->dev);
2784 	struct intel_atomic_state *state =
2785 		to_intel_atomic_state(crtc_state->uapi.state);
2786 
2787 	crtc_state->ips_enabled = false;
2788 
2789 	if (!hsw_crtc_state_ips_capable(crtc_state))
2790 		return 0;
2791 
2792 	/*
2793 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
2794 	 * enabled and disabled dynamically based on package C states,
2795 	 * user space can't make reliable use of the CRCs, so let's just
2796 	 * completely disable it.
2797 	 */
2798 	if (crtc_state->crc_enabled)
2799 		return 0;
2800 
2801 	/* IPS should be fine as long as at least one plane is enabled. */
2802 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
2803 		return 0;
2804 
2805 	if (IS_BROADWELL(dev_priv)) {
2806 		const struct intel_cdclk_state *cdclk_state;
2807 
2808 		cdclk_state = intel_atomic_get_cdclk_state(state);
2809 		if (IS_ERR(cdclk_state))
2810 			return PTR_ERR(cdclk_state);
2811 
2812 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2813 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
2814 			return 0;
2815 	}
2816 
2817 	crtc_state->ips_enabled = true;
2818 
2819 	return 0;
2820 }
2821 
2822 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
2823 {
2824 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2825 
2826 	/* GDG double wide on either pipe, otherwise pipe A only */
2827 	return DISPLAY_VER(dev_priv) < 4 &&
2828 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
2829 }
2830 
2831 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
2832 {
2833 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
2834 	struct drm_rect src;
2835 
2836 	/*
2837 	 * We only use IF-ID interlacing. If we ever use
2838 	 * PF-ID we'll need to adjust the pixel_rate here.
2839 	 */
2840 
2841 	if (!crtc_state->pch_pfit.enabled)
2842 		return pixel_rate;
2843 
2844 	drm_rect_init(&src, 0, 0,
2845 		      crtc_state->pipe_src_w << 16,
2846 		      crtc_state->pipe_src_h << 16);
2847 
2848 	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
2849 				   pixel_rate);
2850 }
2851 
2852 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
2853 					 const struct drm_display_mode *timings)
2854 {
2855 	mode->hdisplay = timings->crtc_hdisplay;
2856 	mode->htotal = timings->crtc_htotal;
2857 	mode->hsync_start = timings->crtc_hsync_start;
2858 	mode->hsync_end = timings->crtc_hsync_end;
2859 
2860 	mode->vdisplay = timings->crtc_vdisplay;
2861 	mode->vtotal = timings->crtc_vtotal;
2862 	mode->vsync_start = timings->crtc_vsync_start;
2863 	mode->vsync_end = timings->crtc_vsync_end;
2864 
2865 	mode->flags = timings->flags;
2866 	mode->type = DRM_MODE_TYPE_DRIVER;
2867 
2868 	mode->clock = timings->crtc_clock;
2869 
2870 	drm_mode_set_name(mode);
2871 }
2872 
2873 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
2874 {
2875 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2876 
2877 	if (HAS_GMCH(dev_priv))
2878 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
2879 		crtc_state->pixel_rate =
2880 			crtc_state->hw.pipe_mode.crtc_clock;
2881 	else
2882 		crtc_state->pixel_rate =
2883 			ilk_pipe_pixel_rate(crtc_state);
2884 }
2885 
2886 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
2887 {
2888 	struct drm_display_mode *mode = &crtc_state->hw.mode;
2889 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
2890 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2891 
2892 	drm_mode_copy(pipe_mode, adjusted_mode);
2893 
2894 	if (crtc_state->bigjoiner) {
2895 		/*
2896 		 * transcoder is programmed to the full mode,
2897 		 * but pipe timings are half of the transcoder mode
2898 		 */
2899 		pipe_mode->crtc_hdisplay /= 2;
2900 		pipe_mode->crtc_hblank_start /= 2;
2901 		pipe_mode->crtc_hblank_end /= 2;
2902 		pipe_mode->crtc_hsync_start /= 2;
2903 		pipe_mode->crtc_hsync_end /= 2;
2904 		pipe_mode->crtc_htotal /= 2;
2905 		pipe_mode->crtc_clock /= 2;
2906 	}
2907 
2908 	if (crtc_state->splitter.enable) {
2909 		int n = crtc_state->splitter.link_count;
2910 		int overlap = crtc_state->splitter.pixel_overlap;
2911 
2912 		/*
2913 		 * eDP MSO uses segment timings from EDID for transcoder
2914 		 * timings, but full mode for everything else.
2915 		 *
2916 		 * h_full = (h_segment - pixel_overlap) * link_count
2917 		 */
2918 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2919 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2920 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2921 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2922 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2923 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2924 		pipe_mode->crtc_clock *= n;
2925 
2926 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2927 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
2928 	} else {
2929 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2930 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
2931 	}
2932 
2933 	intel_crtc_compute_pixel_rate(crtc_state);
2934 
2935 	drm_mode_copy(mode, adjusted_mode);
2936 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
2937 	mode->vdisplay = crtc_state->pipe_src_h;
2938 }
2939 
2940 static void intel_encoder_get_config(struct intel_encoder *encoder,
2941 				     struct intel_crtc_state *crtc_state)
2942 {
2943 	encoder->get_config(encoder, crtc_state);
2944 
2945 	intel_crtc_readout_derived_state(crtc_state);
2946 }
2947 
2948 static int intel_crtc_compute_config(struct intel_crtc *crtc,
2949 				     struct intel_crtc_state *pipe_config)
2950 {
2951 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2952 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
2953 	int clock_limit = dev_priv->max_dotclk_freq;
2954 
2955 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
2956 
2957 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
2958 	if (pipe_config->bigjoiner) {
2959 		pipe_mode->crtc_clock /= 2;
2960 		pipe_mode->crtc_hdisplay /= 2;
2961 		pipe_mode->crtc_hblank_start /= 2;
2962 		pipe_mode->crtc_hblank_end /= 2;
2963 		pipe_mode->crtc_hsync_start /= 2;
2964 		pipe_mode->crtc_hsync_end /= 2;
2965 		pipe_mode->crtc_htotal /= 2;
2966 		pipe_config->pipe_src_w /= 2;
2967 	}
2968 
2969 	if (pipe_config->splitter.enable) {
2970 		int n = pipe_config->splitter.link_count;
2971 		int overlap = pipe_config->splitter.pixel_overlap;
2972 
2973 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
2974 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
2975 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
2976 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
2977 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
2978 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
2979 		pipe_mode->crtc_clock *= n;
2980 	}
2981 
2982 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
2983 
2984 	if (DISPLAY_VER(dev_priv) < 4) {
2985 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
2986 
2987 		/*
2988 		 * Enable double wide mode when the dot clock
2989 		 * is > 90% of the (display) core speed.
2990 		 */
2991 		if (intel_crtc_supports_double_wide(crtc) &&
2992 		    pipe_mode->crtc_clock > clock_limit) {
2993 			clock_limit = dev_priv->max_dotclk_freq;
2994 			pipe_config->double_wide = true;
2995 		}
2996 	}
2997 
2998 	if (pipe_mode->crtc_clock > clock_limit) {
2999 		drm_dbg_kms(&dev_priv->drm,
3000 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
3001 			    pipe_mode->crtc_clock, clock_limit,
3002 			    yesno(pipe_config->double_wide));
3003 		return -EINVAL;
3004 	}
3005 
3006 	/*
3007 	 * Pipe horizontal size must be even in:
3008 	 * - DVO ganged mode
3009 	 * - LVDS dual channel mode
3010 	 * - Double wide pipe
3011 	 */
3012 	if (pipe_config->pipe_src_w & 1) {
3013 		if (pipe_config->double_wide) {
3014 			drm_dbg_kms(&dev_priv->drm,
3015 				    "Odd pipe source width not supported with double wide pipe\n");
3016 			return -EINVAL;
3017 		}
3018 
3019 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
3020 		    intel_is_dual_link_lvds(dev_priv)) {
3021 			drm_dbg_kms(&dev_priv->drm,
3022 				    "Odd pipe source width not supported with dual link LVDS\n");
3023 			return -EINVAL;
3024 		}
3025 	}
3026 
3027 	intel_crtc_compute_pixel_rate(pipe_config);
3028 
3029 	if (pipe_config->has_pch_encoder)
3030 		return ilk_fdi_compute_config(crtc, pipe_config);
3031 
3032 	return 0;
3033 }
3034 
3035 static void
3036 intel_reduce_m_n_ratio(u32 *num, u32 *den)
3037 {
3038 	while (*num > DATA_LINK_M_N_MASK ||
3039 	       *den > DATA_LINK_M_N_MASK) {
3040 		*num >>= 1;
3041 		*den >>= 1;
3042 	}
3043 }
3044 
3045 static void compute_m_n(unsigned int m, unsigned int n,
3046 			u32 *ret_m, u32 *ret_n,
3047 			bool constant_n)
3048 {
3049 	/*
3050 	 * Several DP dongles in particular seem to be fussy about
3051 	 * too large link M/N values. Give N value as 0x8000 that
3052 	 * should be acceptable by specific devices. 0x8000 is the
3053 	 * specified fixed N value for asynchronous clock mode,
3054 	 * which the devices expect also in synchronous clock mode.
3055 	 */
3056 	if (constant_n)
3057 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
3058 	else
3059 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
3060 
3061 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
3062 	intel_reduce_m_n_ratio(ret_m, ret_n);
3063 }
3064 
3065 void
3066 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
3067 		       int pixel_clock, int link_clock,
3068 		       struct intel_link_m_n *m_n,
3069 		       bool constant_n, bool fec_enable)
3070 {
3071 	u32 data_clock = bits_per_pixel * pixel_clock;
3072 
3073 	if (fec_enable)
3074 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
3075 
3076 	m_n->tu = 64;
3077 	compute_m_n(data_clock,
3078 		    link_clock * nlanes * 8,
3079 		    &m_n->gmch_m, &m_n->gmch_n,
3080 		    constant_n);
3081 
3082 	compute_m_n(pixel_clock, link_clock,
3083 		    &m_n->link_m, &m_n->link_n,
3084 		    constant_n);
3085 }
3086 
3087 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
3088 {
3089 	/*
3090 	 * There may be no VBT; and if the BIOS enabled SSC we can
3091 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
3092 	 * BIOS isn't using it, don't assume it will work even if the VBT
3093 	 * indicates as much.
3094 	 */
3095 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
3096 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
3097 						       PCH_DREF_CONTROL) &
3098 			DREF_SSC1_ENABLE;
3099 
3100 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
3101 			drm_dbg_kms(&dev_priv->drm,
3102 				    "SSC %s by BIOS, overriding VBT which says %s\n",
3103 				    enableddisabled(bios_lvds_use_ssc),
3104 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
3105 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
3106 		}
3107 	}
3108 }
3109 
3110 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3111 					 const struct intel_link_m_n *m_n)
3112 {
3113 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3114 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3115 	enum pipe pipe = crtc->pipe;
3116 
3117 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
3118 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
3119 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
3120 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
3121 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
3122 }
3123 
3124 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
3125 				 enum transcoder transcoder)
3126 {
3127 	if (IS_HASWELL(dev_priv))
3128 		return transcoder == TRANSCODER_EDP;
3129 
3130 	/*
3131 	 * Strictly speaking some registers are available before
3132 	 * gen7, but we only support DRRS on gen7+
3133 	 */
3134 	return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
3135 }
3136 
3137 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
3138 					 const struct intel_link_m_n *m_n,
3139 					 const struct intel_link_m_n *m2_n2)
3140 {
3141 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3142 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3143 	enum pipe pipe = crtc->pipe;
3144 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3145 
3146 	if (DISPLAY_VER(dev_priv) >= 5) {
3147 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
3148 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
3149 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
3150 			       m_n->gmch_n);
3151 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
3152 			       m_n->link_m);
3153 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
3154 			       m_n->link_n);
3155 		/*
3156 		 *  M2_N2 registers are set only if DRRS is supported
3157 		 * (to make sure the registers are not unnecessarily accessed).
3158 		 */
3159 		if (m2_n2 && crtc_state->has_drrs &&
3160 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
3161 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
3162 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
3163 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
3164 				       m2_n2->gmch_n);
3165 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
3166 				       m2_n2->link_m);
3167 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
3168 				       m2_n2->link_n);
3169 		}
3170 	} else {
3171 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
3172 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
3173 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
3174 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
3175 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
3176 	}
3177 }
3178 
3179 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
3180 {
3181 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
3182 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3183 
3184 	if (m_n == M1_N1) {
3185 		dp_m_n = &crtc_state->dp_m_n;
3186 		dp_m2_n2 = &crtc_state->dp_m2_n2;
3187 	} else if (m_n == M2_N2) {
3188 
3189 		/*
3190 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
3191 		 * needs to be programmed into M1_N1.
3192 		 */
3193 		dp_m_n = &crtc_state->dp_m2_n2;
3194 	} else {
3195 		drm_err(&i915->drm, "Unsupported divider value\n");
3196 		return;
3197 	}
3198 
3199 	if (crtc_state->has_pch_encoder)
3200 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
3201 	else
3202 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
3203 }
3204 
3205 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
3206 {
3207 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3208 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3209 	enum pipe pipe = crtc->pipe;
3210 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3211 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
3212 	u32 crtc_vtotal, crtc_vblank_end;
3213 	int vsyncshift = 0;
3214 
3215 	/* We need to be careful not to changed the adjusted mode, for otherwise
3216 	 * the hw state checker will get angry at the mismatch. */
3217 	crtc_vtotal = adjusted_mode->crtc_vtotal;
3218 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
3219 
3220 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3221 		/* the chip adds 2 halflines automatically */
3222 		crtc_vtotal -= 1;
3223 		crtc_vblank_end -= 1;
3224 
3225 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3226 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
3227 		else
3228 			vsyncshift = adjusted_mode->crtc_hsync_start -
3229 				adjusted_mode->crtc_htotal / 2;
3230 		if (vsyncshift < 0)
3231 			vsyncshift += adjusted_mode->crtc_htotal;
3232 	}
3233 
3234 	if (DISPLAY_VER(dev_priv) > 3)
3235 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
3236 		               vsyncshift);
3237 
3238 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
3239 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
3240 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
3241 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
3242 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
3243 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
3244 
3245 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
3246 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
3247 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
3248 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
3249 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
3250 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
3251 
3252 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
3253 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
3254 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
3255 	 * bits. */
3256 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
3257 	    (pipe == PIPE_B || pipe == PIPE_C))
3258 		intel_de_write(dev_priv, VTOTAL(pipe),
3259 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
3260 
3261 }
3262 
3263 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
3264 {
3265 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3266 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3267 	enum pipe pipe = crtc->pipe;
3268 
3269 	/* pipesrc controls the size that is scaled from, which should
3270 	 * always be the user's requested size.
3271 	 */
3272 	intel_de_write(dev_priv, PIPESRC(pipe),
3273 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
3274 }
3275 
3276 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
3277 {
3278 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3279 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3280 
3281 	if (DISPLAY_VER(dev_priv) == 2)
3282 		return false;
3283 
3284 	if (DISPLAY_VER(dev_priv) >= 9 ||
3285 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
3286 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
3287 	else
3288 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
3289 }
3290 
3291 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
3292 					 struct intel_crtc_state *pipe_config)
3293 {
3294 	struct drm_device *dev = crtc->base.dev;
3295 	struct drm_i915_private *dev_priv = to_i915(dev);
3296 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
3297 	u32 tmp;
3298 
3299 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
3300 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
3301 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3302 
3303 	if (!transcoder_is_dsi(cpu_transcoder)) {
3304 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
3305 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
3306 							(tmp & 0xffff) + 1;
3307 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
3308 						((tmp >> 16) & 0xffff) + 1;
3309 	}
3310 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
3311 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
3312 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
3313 
3314 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
3315 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
3316 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3317 
3318 	if (!transcoder_is_dsi(cpu_transcoder)) {
3319 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
3320 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
3321 							(tmp & 0xffff) + 1;
3322 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
3323 						((tmp >> 16) & 0xffff) + 1;
3324 	}
3325 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
3326 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
3327 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
3328 
3329 	if (intel_pipe_is_interlaced(pipe_config)) {
3330 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
3331 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
3332 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
3333 	}
3334 }
3335 
3336 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
3337 				    struct intel_crtc_state *pipe_config)
3338 {
3339 	struct drm_device *dev = crtc->base.dev;
3340 	struct drm_i915_private *dev_priv = to_i915(dev);
3341 	u32 tmp;
3342 
3343 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
3344 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
3345 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
3346 }
3347 
3348 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
3349 {
3350 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3351 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3352 	u32 pipeconf;
3353 
3354 	pipeconf = 0;
3355 
3356 	/* we keep both pipes enabled on 830 */
3357 	if (IS_I830(dev_priv))
3358 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
3359 
3360 	if (crtc_state->double_wide)
3361 		pipeconf |= PIPECONF_DOUBLE_WIDE;
3362 
3363 	/* only g4x and later have fancy bpc/dither controls */
3364 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3365 	    IS_CHERRYVIEW(dev_priv)) {
3366 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
3367 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
3368 			pipeconf |= PIPECONF_DITHER_EN |
3369 				    PIPECONF_DITHER_TYPE_SP;
3370 
3371 		switch (crtc_state->pipe_bpp) {
3372 		case 18:
3373 			pipeconf |= PIPECONF_6BPC;
3374 			break;
3375 		case 24:
3376 			pipeconf |= PIPECONF_8BPC;
3377 			break;
3378 		case 30:
3379 			pipeconf |= PIPECONF_10BPC;
3380 			break;
3381 		default:
3382 			/* Case prevented by intel_choose_pipe_bpp_dither. */
3383 			BUG();
3384 		}
3385 	}
3386 
3387 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
3388 		if (DISPLAY_VER(dev_priv) < 4 ||
3389 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3390 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3391 		else
3392 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
3393 	} else {
3394 		pipeconf |= PIPECONF_PROGRESSIVE;
3395 	}
3396 
3397 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3398 	     crtc_state->limited_color_range)
3399 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
3400 
3401 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3402 
3403 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3404 
3405 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
3406 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
3407 }
3408 
3409 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
3410 {
3411 	if (IS_I830(dev_priv))
3412 		return false;
3413 
3414 	return DISPLAY_VER(dev_priv) >= 4 ||
3415 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
3416 }
3417 
3418 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
3419 {
3420 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3421 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3422 	u32 tmp;
3423 
3424 	if (!i9xx_has_pfit(dev_priv))
3425 		return;
3426 
3427 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
3428 	if (!(tmp & PFIT_ENABLE))
3429 		return;
3430 
3431 	/* Check whether the pfit is attached to our pipe. */
3432 	if (DISPLAY_VER(dev_priv) < 4) {
3433 		if (crtc->pipe != PIPE_B)
3434 			return;
3435 	} else {
3436 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
3437 			return;
3438 	}
3439 
3440 	crtc_state->gmch_pfit.control = tmp;
3441 	crtc_state->gmch_pfit.pgm_ratios =
3442 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
3443 }
3444 
3445 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
3446 			       struct intel_crtc_state *pipe_config)
3447 {
3448 	struct drm_device *dev = crtc->base.dev;
3449 	struct drm_i915_private *dev_priv = to_i915(dev);
3450 	enum pipe pipe = crtc->pipe;
3451 	struct dpll clock;
3452 	u32 mdiv;
3453 	int refclk = 100000;
3454 
3455 	/* In case of DSI, DPLL will not be used */
3456 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3457 		return;
3458 
3459 	vlv_dpio_get(dev_priv);
3460 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
3461 	vlv_dpio_put(dev_priv);
3462 
3463 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
3464 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
3465 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
3466 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
3467 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
3468 
3469 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
3470 }
3471 
3472 static void chv_crtc_clock_get(struct intel_crtc *crtc,
3473 			       struct intel_crtc_state *pipe_config)
3474 {
3475 	struct drm_device *dev = crtc->base.dev;
3476 	struct drm_i915_private *dev_priv = to_i915(dev);
3477 	enum pipe pipe = crtc->pipe;
3478 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
3479 	struct dpll clock;
3480 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
3481 	int refclk = 100000;
3482 
3483 	/* In case of DSI, DPLL will not be used */
3484 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
3485 		return;
3486 
3487 	vlv_dpio_get(dev_priv);
3488 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
3489 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
3490 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
3491 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
3492 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
3493 	vlv_dpio_put(dev_priv);
3494 
3495 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
3496 	clock.m2 = (pll_dw0 & 0xff) << 22;
3497 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
3498 		clock.m2 |= pll_dw2 & 0x3fffff;
3499 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
3500 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
3501 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
3502 
3503 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
3504 }
3505 
3506 static enum intel_output_format
3507 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
3508 {
3509 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3510 	u32 tmp;
3511 
3512 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3513 
3514 	if (tmp & PIPEMISC_YUV420_ENABLE) {
3515 		/* We support 4:2:0 in full blend mode only */
3516 		drm_WARN_ON(&dev_priv->drm,
3517 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
3518 
3519 		return INTEL_OUTPUT_FORMAT_YCBCR420;
3520 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
3521 		return INTEL_OUTPUT_FORMAT_YCBCR444;
3522 	} else {
3523 		return INTEL_OUTPUT_FORMAT_RGB;
3524 	}
3525 }
3526 
3527 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
3528 {
3529 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3530 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3531 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3532 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3533 	u32 tmp;
3534 
3535 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
3536 
3537 	if (tmp & DISPPLANE_GAMMA_ENABLE)
3538 		crtc_state->gamma_enable = true;
3539 
3540 	if (!HAS_GMCH(dev_priv) &&
3541 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
3542 		crtc_state->csc_enable = true;
3543 }
3544 
3545 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
3546 				 struct intel_crtc_state *pipe_config)
3547 {
3548 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3549 	enum intel_display_power_domain power_domain;
3550 	intel_wakeref_t wakeref;
3551 	u32 tmp;
3552 	bool ret;
3553 
3554 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
3555 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3556 	if (!wakeref)
3557 		return false;
3558 
3559 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
3560 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
3561 	pipe_config->shared_dpll = NULL;
3562 
3563 	ret = false;
3564 
3565 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
3566 	if (!(tmp & PIPECONF_ENABLE))
3567 		goto out;
3568 
3569 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
3570 	    IS_CHERRYVIEW(dev_priv)) {
3571 		switch (tmp & PIPECONF_BPC_MASK) {
3572 		case PIPECONF_6BPC:
3573 			pipe_config->pipe_bpp = 18;
3574 			break;
3575 		case PIPECONF_8BPC:
3576 			pipe_config->pipe_bpp = 24;
3577 			break;
3578 		case PIPECONF_10BPC:
3579 			pipe_config->pipe_bpp = 30;
3580 			break;
3581 		default:
3582 			break;
3583 		}
3584 	}
3585 
3586 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3587 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
3588 		pipe_config->limited_color_range = true;
3589 
3590 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
3591 		PIPECONF_GAMMA_MODE_SHIFT;
3592 
3593 	if (IS_CHERRYVIEW(dev_priv))
3594 		pipe_config->cgm_mode = intel_de_read(dev_priv,
3595 						      CGM_PIPE_MODE(crtc->pipe));
3596 
3597 	i9xx_get_pipe_color_config(pipe_config);
3598 	intel_color_get_config(pipe_config);
3599 
3600 	if (DISPLAY_VER(dev_priv) < 4)
3601 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
3602 
3603 	intel_get_transcoder_timings(crtc, pipe_config);
3604 	intel_get_pipe_src_size(crtc, pipe_config);
3605 
3606 	i9xx_get_pfit_config(pipe_config);
3607 
3608 	if (DISPLAY_VER(dev_priv) >= 4) {
3609 		/* No way to read it out on pipes B and C */
3610 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
3611 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
3612 		else
3613 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
3614 		pipe_config->pixel_multiplier =
3615 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
3616 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
3617 		pipe_config->dpll_hw_state.dpll_md = tmp;
3618 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
3619 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
3620 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
3621 		pipe_config->pixel_multiplier =
3622 			((tmp & SDVO_MULTIPLIER_MASK)
3623 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
3624 	} else {
3625 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
3626 		 * port and will be fixed up in the encoder->get_config
3627 		 * function. */
3628 		pipe_config->pixel_multiplier = 1;
3629 	}
3630 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
3631 							DPLL(crtc->pipe));
3632 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
3633 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
3634 							       FP0(crtc->pipe));
3635 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
3636 							       FP1(crtc->pipe));
3637 	} else {
3638 		/* Mask out read-only status bits. */
3639 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
3640 						     DPLL_PORTC_READY_MASK |
3641 						     DPLL_PORTB_READY_MASK);
3642 	}
3643 
3644 	if (IS_CHERRYVIEW(dev_priv))
3645 		chv_crtc_clock_get(crtc, pipe_config);
3646 	else if (IS_VALLEYVIEW(dev_priv))
3647 		vlv_crtc_clock_get(crtc, pipe_config);
3648 	else
3649 		i9xx_crtc_clock_get(crtc, pipe_config);
3650 
3651 	/*
3652 	 * Normally the dotclock is filled in by the encoder .get_config()
3653 	 * but in case the pipe is enabled w/o any ports we need a sane
3654 	 * default.
3655 	 */
3656 	pipe_config->hw.adjusted_mode.crtc_clock =
3657 		pipe_config->port_clock / pipe_config->pixel_multiplier;
3658 
3659 	ret = true;
3660 
3661 out:
3662 	intel_display_power_put(dev_priv, power_domain, wakeref);
3663 
3664 	return ret;
3665 }
3666 
3667 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
3668 {
3669 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3670 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3671 	enum pipe pipe = crtc->pipe;
3672 	u32 val;
3673 
3674 	val = 0;
3675 
3676 	switch (crtc_state->pipe_bpp) {
3677 	case 18:
3678 		val |= PIPECONF_6BPC;
3679 		break;
3680 	case 24:
3681 		val |= PIPECONF_8BPC;
3682 		break;
3683 	case 30:
3684 		val |= PIPECONF_10BPC;
3685 		break;
3686 	case 36:
3687 		val |= PIPECONF_12BPC;
3688 		break;
3689 	default:
3690 		/* Case prevented by intel_choose_pipe_bpp_dither. */
3691 		BUG();
3692 	}
3693 
3694 	if (crtc_state->dither)
3695 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3696 
3697 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3698 		val |= PIPECONF_INTERLACED_ILK;
3699 	else
3700 		val |= PIPECONF_PROGRESSIVE;
3701 
3702 	/*
3703 	 * This would end up with an odd purple hue over
3704 	 * the entire display. Make sure we don't do it.
3705 	 */
3706 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
3707 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
3708 
3709 	if (crtc_state->limited_color_range &&
3710 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
3711 		val |= PIPECONF_COLOR_RANGE_SELECT;
3712 
3713 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3714 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
3715 
3716 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
3717 
3718 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3719 
3720 	intel_de_write(dev_priv, PIPECONF(pipe), val);
3721 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
3722 }
3723 
3724 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
3725 {
3726 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3727 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3728 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3729 	u32 val = 0;
3730 
3731 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
3732 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
3733 
3734 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3735 		val |= PIPECONF_INTERLACED_ILK;
3736 	else
3737 		val |= PIPECONF_PROGRESSIVE;
3738 
3739 	if (IS_HASWELL(dev_priv) &&
3740 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
3741 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
3742 
3743 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
3744 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
3745 }
3746 
3747 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
3748 {
3749 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3750 	const struct intel_crtc_scaler_state *scaler_state =
3751 		&crtc_state->scaler_state;
3752 
3753 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3754 	u32 val = 0;
3755 	int i;
3756 
3757 	switch (crtc_state->pipe_bpp) {
3758 	case 18:
3759 		val |= PIPEMISC_6_BPC;
3760 		break;
3761 	case 24:
3762 		val |= PIPEMISC_8_BPC;
3763 		break;
3764 	case 30:
3765 		val |= PIPEMISC_10_BPC;
3766 		break;
3767 	case 36:
3768 		/* Port output 12BPC defined for ADLP+ */
3769 		if (DISPLAY_VER(dev_priv) > 12)
3770 			val |= PIPEMISC_12_BPC_ADLP;
3771 		break;
3772 	default:
3773 		MISSING_CASE(crtc_state->pipe_bpp);
3774 		break;
3775 	}
3776 
3777 	if (crtc_state->dither)
3778 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
3779 
3780 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
3781 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
3782 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
3783 
3784 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3785 		val |= PIPEMISC_YUV420_ENABLE |
3786 			PIPEMISC_YUV420_MODE_FULL_BLEND;
3787 
3788 	if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
3789 		val |= PIPEMISC_HDR_MODE_PRECISION;
3790 
3791 	if (DISPLAY_VER(dev_priv) >= 12)
3792 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
3793 
3794 	if (IS_ALDERLAKE_P(dev_priv)) {
3795 		bool scaler_in_use = false;
3796 
3797 		for (i = 0; i < crtc->num_scalers; i++) {
3798 			if (!scaler_state->scalers[i].in_use)
3799 				continue;
3800 
3801 			scaler_in_use = true;
3802 			break;
3803 		}
3804 
3805 		intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
3806 			     PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
3807 			     scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
3808 			     PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
3809 	}
3810 
3811 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
3812 }
3813 
3814 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
3815 {
3816 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3817 	u32 tmp;
3818 
3819 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
3820 
3821 	switch (tmp & PIPEMISC_BPC_MASK) {
3822 	case PIPEMISC_6_BPC:
3823 		return 18;
3824 	case PIPEMISC_8_BPC:
3825 		return 24;
3826 	case PIPEMISC_10_BPC:
3827 		return 30;
3828 	/*
3829 	 * PORT OUTPUT 12 BPC defined for ADLP+.
3830 	 *
3831 	 * TODO:
3832 	 * For previous platforms with DSI interface, bits 5:7
3833 	 * are used for storing pipe_bpp irrespective of dithering.
3834 	 * Since the value of 12 BPC is not defined for these bits
3835 	 * on older platforms, need to find a workaround for 12 BPC
3836 	 * MIPI DSI HW readout.
3837 	 */
3838 	case PIPEMISC_12_BPC_ADLP:
3839 		if (DISPLAY_VER(dev_priv) > 12)
3840 			return 36;
3841 		fallthrough;
3842 	default:
3843 		MISSING_CASE(tmp);
3844 		return 0;
3845 	}
3846 }
3847 
3848 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
3849 {
3850 	/*
3851 	 * Account for spread spectrum to avoid
3852 	 * oversubscribing the link. Max center spread
3853 	 * is 2.5%; use 5% for safety's sake.
3854 	 */
3855 	u32 bps = target_clock * bpp * 21 / 20;
3856 	return DIV_ROUND_UP(bps, link_bw * 8);
3857 }
3858 
3859 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
3860 					 struct intel_link_m_n *m_n)
3861 {
3862 	struct drm_device *dev = crtc->base.dev;
3863 	struct drm_i915_private *dev_priv = to_i915(dev);
3864 	enum pipe pipe = crtc->pipe;
3865 
3866 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
3867 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
3868 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3869 		& ~TU_SIZE_MASK;
3870 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
3871 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
3872 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3873 }
3874 
3875 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
3876 					 enum transcoder transcoder,
3877 					 struct intel_link_m_n *m_n,
3878 					 struct intel_link_m_n *m2_n2)
3879 {
3880 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3881 	enum pipe pipe = crtc->pipe;
3882 
3883 	if (DISPLAY_VER(dev_priv) >= 5) {
3884 		m_n->link_m = intel_de_read(dev_priv,
3885 					    PIPE_LINK_M1(transcoder));
3886 		m_n->link_n = intel_de_read(dev_priv,
3887 					    PIPE_LINK_N1(transcoder));
3888 		m_n->gmch_m = intel_de_read(dev_priv,
3889 					    PIPE_DATA_M1(transcoder))
3890 			& ~TU_SIZE_MASK;
3891 		m_n->gmch_n = intel_de_read(dev_priv,
3892 					    PIPE_DATA_N1(transcoder));
3893 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
3894 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3895 
3896 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
3897 			m2_n2->link_m = intel_de_read(dev_priv,
3898 						      PIPE_LINK_M2(transcoder));
3899 			m2_n2->link_n =	intel_de_read(dev_priv,
3900 							     PIPE_LINK_N2(transcoder));
3901 			m2_n2->gmch_m =	intel_de_read(dev_priv,
3902 							     PIPE_DATA_M2(transcoder))
3903 					& ~TU_SIZE_MASK;
3904 			m2_n2->gmch_n =	intel_de_read(dev_priv,
3905 							     PIPE_DATA_N2(transcoder));
3906 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
3907 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3908 		}
3909 	} else {
3910 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
3911 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
3912 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3913 			& ~TU_SIZE_MASK;
3914 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
3915 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
3916 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
3917 	}
3918 }
3919 
3920 void intel_dp_get_m_n(struct intel_crtc *crtc,
3921 		      struct intel_crtc_state *pipe_config)
3922 {
3923 	if (pipe_config->has_pch_encoder)
3924 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
3925 	else
3926 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3927 					     &pipe_config->dp_m_n,
3928 					     &pipe_config->dp_m2_n2);
3929 }
3930 
3931 void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
3932 			    struct intel_crtc_state *pipe_config)
3933 {
3934 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
3935 				     &pipe_config->fdi_m_n, NULL);
3936 }
3937 
3938 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
3939 				  u32 pos, u32 size)
3940 {
3941 	drm_rect_init(&crtc_state->pch_pfit.dst,
3942 		      pos >> 16, pos & 0xffff,
3943 		      size >> 16, size & 0xffff);
3944 }
3945 
3946 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
3947 {
3948 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3949 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3950 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
3951 	int id = -1;
3952 	int i;
3953 
3954 	/* find scaler attached to this pipe */
3955 	for (i = 0; i < crtc->num_scalers; i++) {
3956 		u32 ctl, pos, size;
3957 
3958 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
3959 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
3960 			continue;
3961 
3962 		id = i;
3963 		crtc_state->pch_pfit.enabled = true;
3964 
3965 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
3966 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
3967 
3968 		ilk_get_pfit_pos_size(crtc_state, pos, size);
3969 
3970 		scaler_state->scalers[i].in_use = true;
3971 		break;
3972 	}
3973 
3974 	scaler_state->scaler_id = id;
3975 	if (id >= 0)
3976 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
3977 	else
3978 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
3979 }
3980 
3981 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
3982 {
3983 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3984 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3985 	u32 ctl, pos, size;
3986 
3987 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
3988 	if ((ctl & PF_ENABLE) == 0)
3989 		return;
3990 
3991 	crtc_state->pch_pfit.enabled = true;
3992 
3993 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
3994 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
3995 
3996 	ilk_get_pfit_pos_size(crtc_state, pos, size);
3997 
3998 	/*
3999 	 * We currently do not free assignements of panel fitters on
4000 	 * ivb/hsw (since we don't use the higher upscaling modes which
4001 	 * differentiates them) so just WARN about this case for now.
4002 	 */
4003 	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
4004 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
4005 }
4006 
4007 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
4008 				struct intel_crtc_state *pipe_config)
4009 {
4010 	struct drm_device *dev = crtc->base.dev;
4011 	struct drm_i915_private *dev_priv = to_i915(dev);
4012 	enum intel_display_power_domain power_domain;
4013 	intel_wakeref_t wakeref;
4014 	u32 tmp;
4015 	bool ret;
4016 
4017 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4018 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4019 	if (!wakeref)
4020 		return false;
4021 
4022 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4023 	pipe_config->shared_dpll = NULL;
4024 
4025 	ret = false;
4026 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4027 	if (!(tmp & PIPECONF_ENABLE))
4028 		goto out;
4029 
4030 	switch (tmp & PIPECONF_BPC_MASK) {
4031 	case PIPECONF_6BPC:
4032 		pipe_config->pipe_bpp = 18;
4033 		break;
4034 	case PIPECONF_8BPC:
4035 		pipe_config->pipe_bpp = 24;
4036 		break;
4037 	case PIPECONF_10BPC:
4038 		pipe_config->pipe_bpp = 30;
4039 		break;
4040 	case PIPECONF_12BPC:
4041 		pipe_config->pipe_bpp = 36;
4042 		break;
4043 	default:
4044 		break;
4045 	}
4046 
4047 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
4048 		pipe_config->limited_color_range = true;
4049 
4050 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
4051 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
4052 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
4053 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4054 		break;
4055 	default:
4056 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4057 		break;
4058 	}
4059 
4060 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
4061 		PIPECONF_GAMMA_MODE_SHIFT;
4062 
4063 	pipe_config->csc_mode = intel_de_read(dev_priv,
4064 					      PIPE_CSC_MODE(crtc->pipe));
4065 
4066 	i9xx_get_pipe_color_config(pipe_config);
4067 	intel_color_get_config(pipe_config);
4068 
4069 	pipe_config->pixel_multiplier = 1;
4070 
4071 	ilk_pch_get_config(pipe_config);
4072 
4073 	intel_get_transcoder_timings(crtc, pipe_config);
4074 	intel_get_pipe_src_size(crtc, pipe_config);
4075 
4076 	ilk_get_pfit_config(pipe_config);
4077 
4078 	ret = true;
4079 
4080 out:
4081 	intel_display_power_put(dev_priv, power_domain, wakeref);
4082 
4083 	return ret;
4084 }
4085 
4086 static u8 bigjoiner_pipes(struct drm_i915_private *i915)
4087 {
4088 	if (DISPLAY_VER(i915) >= 12)
4089 		return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
4090 	else if (DISPLAY_VER(i915) >= 11)
4091 		return BIT(PIPE_B) | BIT(PIPE_C);
4092 	else
4093 		return 0;
4094 }
4095 
4096 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
4097 					   enum transcoder cpu_transcoder)
4098 {
4099 	enum intel_display_power_domain power_domain;
4100 	intel_wakeref_t wakeref;
4101 	u32 tmp = 0;
4102 
4103 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4104 
4105 	with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4106 		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4107 
4108 	return tmp & TRANS_DDI_FUNC_ENABLE;
4109 }
4110 
4111 static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
4112 {
4113 	u8 master_pipes = 0, slave_pipes = 0;
4114 	struct intel_crtc *crtc;
4115 
4116 	for_each_intel_crtc(&dev_priv->drm, crtc) {
4117 		enum intel_display_power_domain power_domain;
4118 		enum pipe pipe = crtc->pipe;
4119 		intel_wakeref_t wakeref;
4120 
4121 		if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
4122 			continue;
4123 
4124 		power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
4125 		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4126 			u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4127 
4128 			if (!(tmp & BIG_JOINER_ENABLE))
4129 				continue;
4130 
4131 			if (tmp & MASTER_BIG_JOINER_ENABLE)
4132 				master_pipes |= BIT(pipe);
4133 			else
4134 				slave_pipes |= BIT(pipe);
4135 		}
4136 
4137 		if (DISPLAY_VER(dev_priv) < 13)
4138 			continue;
4139 
4140 		power_domain = POWER_DOMAIN_PIPE(pipe);
4141 		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
4142 			u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
4143 
4144 			if (tmp & UNCOMPRESSED_JOINER_MASTER)
4145 				master_pipes |= BIT(pipe);
4146 			if (tmp & UNCOMPRESSED_JOINER_SLAVE)
4147 				slave_pipes |= BIT(pipe);
4148 		}
4149 	}
4150 
4151 	/* Bigjoiner pipes should always be consecutive master and slave */
4152 	drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
4153 		 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
4154 		 master_pipes, slave_pipes);
4155 
4156 	return slave_pipes;
4157 }
4158 
4159 static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
4160 {
4161 	u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
4162 
4163 	if (DISPLAY_VER(i915) >= 11)
4164 		panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
4165 
4166 	return panel_transcoder_mask;
4167 }
4168 
4169 static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
4170 {
4171 	struct drm_device *dev = crtc->base.dev;
4172 	struct drm_i915_private *dev_priv = to_i915(dev);
4173 	u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
4174 	enum transcoder cpu_transcoder;
4175 	u8 enabled_transcoders = 0;
4176 
4177 	/*
4178 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
4179 	 * consistency and less surprising code; it's in always on power).
4180 	 */
4181 	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
4182 				       panel_transcoder_mask) {
4183 		enum intel_display_power_domain power_domain;
4184 		intel_wakeref_t wakeref;
4185 		enum pipe trans_pipe;
4186 		u32 tmp = 0;
4187 
4188 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
4189 		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
4190 			tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
4191 
4192 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
4193 			continue;
4194 
4195 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
4196 		default:
4197 			drm_WARN(dev, 1,
4198 				 "unknown pipe linked to transcoder %s\n",
4199 				 transcoder_name(cpu_transcoder));
4200 			fallthrough;
4201 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
4202 		case TRANS_DDI_EDP_INPUT_A_ON:
4203 			trans_pipe = PIPE_A;
4204 			break;
4205 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
4206 			trans_pipe = PIPE_B;
4207 			break;
4208 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
4209 			trans_pipe = PIPE_C;
4210 			break;
4211 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
4212 			trans_pipe = PIPE_D;
4213 			break;
4214 		}
4215 
4216 		if (trans_pipe == crtc->pipe)
4217 			enabled_transcoders |= BIT(cpu_transcoder);
4218 	}
4219 
4220 	/* single pipe or bigjoiner master */
4221 	cpu_transcoder = (enum transcoder) crtc->pipe;
4222 	if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4223 		enabled_transcoders |= BIT(cpu_transcoder);
4224 
4225 	/* bigjoiner slave -> consider the master pipe's transcoder as well */
4226 	if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
4227 		cpu_transcoder = (enum transcoder) crtc->pipe - 1;
4228 		if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
4229 			enabled_transcoders |= BIT(cpu_transcoder);
4230 	}
4231 
4232 	return enabled_transcoders;
4233 }
4234 
4235 static bool has_edp_transcoders(u8 enabled_transcoders)
4236 {
4237 	return enabled_transcoders & BIT(TRANSCODER_EDP);
4238 }
4239 
4240 static bool has_dsi_transcoders(u8 enabled_transcoders)
4241 {
4242 	return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
4243 				      BIT(TRANSCODER_DSI_1));
4244 }
4245 
4246 static bool has_pipe_transcoders(u8 enabled_transcoders)
4247 {
4248 	return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
4249 				       BIT(TRANSCODER_DSI_0) |
4250 				       BIT(TRANSCODER_DSI_1));
4251 }
4252 
4253 static void assert_enabled_transcoders(struct drm_i915_private *i915,
4254 				       u8 enabled_transcoders)
4255 {
4256 	/* Only one type of transcoder please */
4257 	drm_WARN_ON(&i915->drm,
4258 		    has_edp_transcoders(enabled_transcoders) +
4259 		    has_dsi_transcoders(enabled_transcoders) +
4260 		    has_pipe_transcoders(enabled_transcoders) > 1);
4261 
4262 	/* Only DSI transcoders can be ganged */
4263 	drm_WARN_ON(&i915->drm,
4264 		    !has_dsi_transcoders(enabled_transcoders) &&
4265 		    !is_power_of_2(enabled_transcoders));
4266 }
4267 
4268 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
4269 				     struct intel_crtc_state *pipe_config,
4270 				     struct intel_display_power_domain_set *power_domain_set)
4271 {
4272 	struct drm_device *dev = crtc->base.dev;
4273 	struct drm_i915_private *dev_priv = to_i915(dev);
4274 	unsigned long enabled_transcoders;
4275 	u32 tmp;
4276 
4277 	enabled_transcoders = hsw_enabled_transcoders(crtc);
4278 	if (!enabled_transcoders)
4279 		return false;
4280 
4281 	assert_enabled_transcoders(dev_priv, enabled_transcoders);
4282 
4283 	/*
4284 	 * With the exception of DSI we should only ever have
4285 	 * a single enabled transcoder. With DSI let's just
4286 	 * pick the first one.
4287 	 */
4288 	pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
4289 
4290 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4291 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
4292 		return false;
4293 
4294 	if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
4295 		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
4296 
4297 		if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
4298 			pipe_config->pch_pfit.force_thru = true;
4299 	}
4300 
4301 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
4302 
4303 	return tmp & PIPECONF_ENABLE;
4304 }
4305 
4306 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
4307 					 struct intel_crtc_state *pipe_config,
4308 					 struct intel_display_power_domain_set *power_domain_set)
4309 {
4310 	struct drm_device *dev = crtc->base.dev;
4311 	struct drm_i915_private *dev_priv = to_i915(dev);
4312 	enum transcoder cpu_transcoder;
4313 	enum port port;
4314 	u32 tmp;
4315 
4316 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
4317 		if (port == PORT_A)
4318 			cpu_transcoder = TRANSCODER_DSI_A;
4319 		else
4320 			cpu_transcoder = TRANSCODER_DSI_C;
4321 
4322 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
4323 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
4324 			continue;
4325 
4326 		/*
4327 		 * The PLL needs to be enabled with a valid divider
4328 		 * configuration, otherwise accessing DSI registers will hang
4329 		 * the machine. See BSpec North Display Engine
4330 		 * registers/MIPI[BXT]. We can break out here early, since we
4331 		 * need the same DSI PLL to be enabled for both DSI ports.
4332 		 */
4333 		if (!bxt_dsi_pll_is_enabled(dev_priv))
4334 			break;
4335 
4336 		/* XXX: this works for video mode only */
4337 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
4338 		if (!(tmp & DPI_ENABLE))
4339 			continue;
4340 
4341 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
4342 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
4343 			continue;
4344 
4345 		pipe_config->cpu_transcoder = cpu_transcoder;
4346 		break;
4347 	}
4348 
4349 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
4350 }
4351 
4352 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
4353 				struct intel_crtc_state *pipe_config)
4354 {
4355 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4356 	struct intel_display_power_domain_set power_domain_set = { };
4357 	bool active;
4358 	u32 tmp;
4359 
4360 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4361 						       POWER_DOMAIN_PIPE(crtc->pipe)))
4362 		return false;
4363 
4364 	pipe_config->shared_dpll = NULL;
4365 
4366 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
4367 
4368 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
4369 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
4370 		drm_WARN_ON(&dev_priv->drm, active);
4371 		active = true;
4372 	}
4373 
4374 	intel_dsc_get_config(pipe_config);
4375 	if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
4376 		intel_uncompressed_joiner_get_config(pipe_config);
4377 
4378 	if (!active)
4379 		goto out;
4380 
4381 	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
4382 	    DISPLAY_VER(dev_priv) >= 11)
4383 		intel_get_transcoder_timings(crtc, pipe_config);
4384 
4385 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
4386 		intel_vrr_get_config(crtc, pipe_config);
4387 
4388 	intel_get_pipe_src_size(crtc, pipe_config);
4389 
4390 	if (IS_HASWELL(dev_priv)) {
4391 		u32 tmp = intel_de_read(dev_priv,
4392 					PIPECONF(pipe_config->cpu_transcoder));
4393 
4394 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
4395 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
4396 		else
4397 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4398 	} else {
4399 		pipe_config->output_format =
4400 			bdw_get_pipemisc_output_format(crtc);
4401 	}
4402 
4403 	pipe_config->gamma_mode = intel_de_read(dev_priv,
4404 						GAMMA_MODE(crtc->pipe));
4405 
4406 	pipe_config->csc_mode = intel_de_read(dev_priv,
4407 					      PIPE_CSC_MODE(crtc->pipe));
4408 
4409 	if (DISPLAY_VER(dev_priv) >= 9) {
4410 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
4411 
4412 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
4413 			pipe_config->gamma_enable = true;
4414 
4415 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
4416 			pipe_config->csc_enable = true;
4417 	} else {
4418 		i9xx_get_pipe_color_config(pipe_config);
4419 	}
4420 
4421 	intel_color_get_config(pipe_config);
4422 
4423 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
4424 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
4425 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4426 		pipe_config->ips_linetime =
4427 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
4428 
4429 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
4430 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
4431 		if (DISPLAY_VER(dev_priv) >= 9)
4432 			skl_get_pfit_config(pipe_config);
4433 		else
4434 			ilk_get_pfit_config(pipe_config);
4435 	}
4436 
4437 	if (hsw_crtc_supports_ips(crtc)) {
4438 		if (IS_HASWELL(dev_priv))
4439 			pipe_config->ips_enabled = intel_de_read(dev_priv,
4440 								 IPS_CTL) & IPS_ENABLE;
4441 		else {
4442 			/*
4443 			 * We cannot readout IPS state on broadwell, set to
4444 			 * true so we can set it to a defined state on first
4445 			 * commit.
4446 			 */
4447 			pipe_config->ips_enabled = true;
4448 		}
4449 	}
4450 
4451 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
4452 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
4453 		pipe_config->pixel_multiplier =
4454 			intel_de_read(dev_priv,
4455 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
4456 	} else {
4457 		pipe_config->pixel_multiplier = 1;
4458 	}
4459 
4460 out:
4461 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
4462 
4463 	return active;
4464 }
4465 
4466 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
4467 {
4468 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4469 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
4470 
4471 	if (!i915->display->get_pipe_config(crtc, crtc_state))
4472 		return false;
4473 
4474 	crtc_state->hw.active = true;
4475 
4476 	intel_crtc_readout_derived_state(crtc_state);
4477 
4478 	return true;
4479 }
4480 
4481 /* VESA 640x480x72Hz mode to set on the pipe */
4482 static const struct drm_display_mode load_detect_mode = {
4483 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4484 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4485 };
4486 
4487 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
4488 					struct drm_crtc *crtc)
4489 {
4490 	struct drm_plane *plane;
4491 	struct drm_plane_state *plane_state;
4492 	int ret, i;
4493 
4494 	ret = drm_atomic_add_affected_planes(state, crtc);
4495 	if (ret)
4496 		return ret;
4497 
4498 	for_each_new_plane_in_state(state, plane, plane_state, i) {
4499 		if (plane_state->crtc != crtc)
4500 			continue;
4501 
4502 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
4503 		if (ret)
4504 			return ret;
4505 
4506 		drm_atomic_set_fb_for_plane(plane_state, NULL);
4507 	}
4508 
4509 	return 0;
4510 }
4511 
4512 int intel_get_load_detect_pipe(struct drm_connector *connector,
4513 			       struct intel_load_detect_pipe *old,
4514 			       struct drm_modeset_acquire_ctx *ctx)
4515 {
4516 	struct intel_encoder *encoder =
4517 		intel_attached_encoder(to_intel_connector(connector));
4518 	struct intel_crtc *possible_crtc;
4519 	struct intel_crtc *crtc = NULL;
4520 	struct drm_device *dev = encoder->base.dev;
4521 	struct drm_i915_private *dev_priv = to_i915(dev);
4522 	struct drm_mode_config *config = &dev->mode_config;
4523 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
4524 	struct drm_connector_state *connector_state;
4525 	struct intel_crtc_state *crtc_state;
4526 	int ret;
4527 
4528 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4529 		    connector->base.id, connector->name,
4530 		    encoder->base.base.id, encoder->base.name);
4531 
4532 	old->restore_state = NULL;
4533 
4534 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
4535 
4536 	/*
4537 	 * Algorithm gets a little messy:
4538 	 *
4539 	 *   - if the connector already has an assigned crtc, use it (but make
4540 	 *     sure it's on first)
4541 	 *
4542 	 *   - try to find the first unused crtc that can drive this connector,
4543 	 *     and use that if we find one
4544 	 */
4545 
4546 	/* See if we already have a CRTC for this connector */
4547 	if (connector->state->crtc) {
4548 		crtc = to_intel_crtc(connector->state->crtc);
4549 
4550 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4551 		if (ret)
4552 			goto fail;
4553 
4554 		/* Make sure the crtc and connector are running */
4555 		goto found;
4556 	}
4557 
4558 	/* Find an unused one (if possible) */
4559 	for_each_intel_crtc(dev, possible_crtc) {
4560 		if (!(encoder->base.possible_crtcs &
4561 		      drm_crtc_mask(&possible_crtc->base)))
4562 			continue;
4563 
4564 		ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
4565 		if (ret)
4566 			goto fail;
4567 
4568 		if (possible_crtc->base.state->enable) {
4569 			drm_modeset_unlock(&possible_crtc->base.mutex);
4570 			continue;
4571 		}
4572 
4573 		crtc = possible_crtc;
4574 		break;
4575 	}
4576 
4577 	/*
4578 	 * If we didn't find an unused CRTC, don't use any.
4579 	 */
4580 	if (!crtc) {
4581 		drm_dbg_kms(&dev_priv->drm,
4582 			    "no pipe available for load-detect\n");
4583 		ret = -ENODEV;
4584 		goto fail;
4585 	}
4586 
4587 found:
4588 	state = drm_atomic_state_alloc(dev);
4589 	restore_state = drm_atomic_state_alloc(dev);
4590 	if (!state || !restore_state) {
4591 		ret = -ENOMEM;
4592 		goto fail;
4593 	}
4594 
4595 	state->acquire_ctx = ctx;
4596 	restore_state->acquire_ctx = ctx;
4597 
4598 	connector_state = drm_atomic_get_connector_state(state, connector);
4599 	if (IS_ERR(connector_state)) {
4600 		ret = PTR_ERR(connector_state);
4601 		goto fail;
4602 	}
4603 
4604 	ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
4605 	if (ret)
4606 		goto fail;
4607 
4608 	crtc_state = intel_atomic_get_crtc_state(state, crtc);
4609 	if (IS_ERR(crtc_state)) {
4610 		ret = PTR_ERR(crtc_state);
4611 		goto fail;
4612 	}
4613 
4614 	crtc_state->uapi.active = true;
4615 
4616 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
4617 					   &load_detect_mode);
4618 	if (ret)
4619 		goto fail;
4620 
4621 	ret = intel_modeset_disable_planes(state, &crtc->base);
4622 	if (ret)
4623 		goto fail;
4624 
4625 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
4626 	if (!ret)
4627 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
4628 	if (!ret)
4629 		ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
4630 	if (ret) {
4631 		drm_dbg_kms(&dev_priv->drm,
4632 			    "Failed to create a copy of old state to restore: %i\n",
4633 			    ret);
4634 		goto fail;
4635 	}
4636 
4637 	ret = drm_atomic_commit(state);
4638 	if (ret) {
4639 		drm_dbg_kms(&dev_priv->drm,
4640 			    "failed to set mode on load-detect pipe\n");
4641 		goto fail;
4642 	}
4643 
4644 	old->restore_state = restore_state;
4645 	drm_atomic_state_put(state);
4646 
4647 	/* let the connector get through one full cycle before testing */
4648 	intel_wait_for_vblank(dev_priv, crtc->pipe);
4649 	return true;
4650 
4651 fail:
4652 	if (state) {
4653 		drm_atomic_state_put(state);
4654 		state = NULL;
4655 	}
4656 	if (restore_state) {
4657 		drm_atomic_state_put(restore_state);
4658 		restore_state = NULL;
4659 	}
4660 
4661 	if (ret == -EDEADLK)
4662 		return ret;
4663 
4664 	return false;
4665 }
4666 
4667 void intel_release_load_detect_pipe(struct drm_connector *connector,
4668 				    struct intel_load_detect_pipe *old,
4669 				    struct drm_modeset_acquire_ctx *ctx)
4670 {
4671 	struct intel_encoder *intel_encoder =
4672 		intel_attached_encoder(to_intel_connector(connector));
4673 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
4674 	struct drm_encoder *encoder = &intel_encoder->base;
4675 	struct drm_atomic_state *state = old->restore_state;
4676 	int ret;
4677 
4678 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4679 		    connector->base.id, connector->name,
4680 		    encoder->base.id, encoder->name);
4681 
4682 	if (!state)
4683 		return;
4684 
4685 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4686 	if (ret)
4687 		drm_dbg_kms(&i915->drm,
4688 			    "Couldn't release load detect pipe: %i\n", ret);
4689 	drm_atomic_state_put(state);
4690 }
4691 
4692 static int i9xx_pll_refclk(struct drm_device *dev,
4693 			   const struct intel_crtc_state *pipe_config)
4694 {
4695 	struct drm_i915_private *dev_priv = to_i915(dev);
4696 	u32 dpll = pipe_config->dpll_hw_state.dpll;
4697 
4698 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
4699 		return dev_priv->vbt.lvds_ssc_freq;
4700 	else if (HAS_PCH_SPLIT(dev_priv))
4701 		return 120000;
4702 	else if (DISPLAY_VER(dev_priv) != 2)
4703 		return 96000;
4704 	else
4705 		return 48000;
4706 }
4707 
4708 /* Returns the clock of the currently programmed mode of the given pipe. */
4709 void i9xx_crtc_clock_get(struct intel_crtc *crtc,
4710 			 struct intel_crtc_state *pipe_config)
4711 {
4712 	struct drm_device *dev = crtc->base.dev;
4713 	struct drm_i915_private *dev_priv = to_i915(dev);
4714 	u32 dpll = pipe_config->dpll_hw_state.dpll;
4715 	u32 fp;
4716 	struct dpll clock;
4717 	int port_clock;
4718 	int refclk = i9xx_pll_refclk(dev, pipe_config);
4719 
4720 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
4721 		fp = pipe_config->dpll_hw_state.fp0;
4722 	else
4723 		fp = pipe_config->dpll_hw_state.fp1;
4724 
4725 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
4726 	if (IS_PINEVIEW(dev_priv)) {
4727 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
4728 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
4729 	} else {
4730 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
4731 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
4732 	}
4733 
4734 	if (DISPLAY_VER(dev_priv) != 2) {
4735 		if (IS_PINEVIEW(dev_priv))
4736 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
4737 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
4738 		else
4739 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
4740 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
4741 
4742 		switch (dpll & DPLL_MODE_MASK) {
4743 		case DPLLB_MODE_DAC_SERIAL:
4744 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
4745 				5 : 10;
4746 			break;
4747 		case DPLLB_MODE_LVDS:
4748 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
4749 				7 : 14;
4750 			break;
4751 		default:
4752 			drm_dbg_kms(&dev_priv->drm,
4753 				    "Unknown DPLL mode %08x in programmed "
4754 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
4755 			return;
4756 		}
4757 
4758 		if (IS_PINEVIEW(dev_priv))
4759 			port_clock = pnv_calc_dpll_params(refclk, &clock);
4760 		else
4761 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
4762 	} else {
4763 		enum pipe lvds_pipe;
4764 
4765 		if (IS_I85X(dev_priv) &&
4766 		    intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
4767 		    lvds_pipe == crtc->pipe) {
4768 			u32 lvds = intel_de_read(dev_priv, LVDS);
4769 
4770 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
4771 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
4772 
4773 			if (lvds & LVDS_CLKB_POWER_UP)
4774 				clock.p2 = 7;
4775 			else
4776 				clock.p2 = 14;
4777 		} else {
4778 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
4779 				clock.p1 = 2;
4780 			else {
4781 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
4782 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
4783 			}
4784 			if (dpll & PLL_P2_DIVIDE_BY_4)
4785 				clock.p2 = 4;
4786 			else
4787 				clock.p2 = 2;
4788 		}
4789 
4790 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
4791 	}
4792 
4793 	/*
4794 	 * This value includes pixel_multiplier. We will use
4795 	 * port_clock to compute adjusted_mode.crtc_clock in the
4796 	 * encoder's get_config() function.
4797 	 */
4798 	pipe_config->port_clock = port_clock;
4799 }
4800 
4801 int intel_dotclock_calculate(int link_freq,
4802 			     const struct intel_link_m_n *m_n)
4803 {
4804 	/*
4805 	 * The calculation for the data clock is:
4806 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
4807 	 * But we want to avoid losing precison if possible, so:
4808 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
4809 	 *
4810 	 * and the link clock is simpler:
4811 	 * link_clock = (m * link_clock) / n
4812 	 */
4813 
4814 	if (!m_n->link_n)
4815 		return 0;
4816 
4817 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
4818 }
4819 
4820 /* Returns the currently programmed mode of the given encoder. */
4821 struct drm_display_mode *
4822 intel_encoder_current_mode(struct intel_encoder *encoder)
4823 {
4824 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4825 	struct intel_crtc_state *crtc_state;
4826 	struct drm_display_mode *mode;
4827 	struct intel_crtc *crtc;
4828 	enum pipe pipe;
4829 
4830 	if (!encoder->get_hw_state(encoder, &pipe))
4831 		return NULL;
4832 
4833 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
4834 
4835 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
4836 	if (!mode)
4837 		return NULL;
4838 
4839 	crtc_state = intel_crtc_state_alloc(crtc);
4840 	if (!crtc_state) {
4841 		kfree(mode);
4842 		return NULL;
4843 	}
4844 
4845 	if (!intel_crtc_get_pipe_config(crtc_state)) {
4846 		kfree(crtc_state);
4847 		kfree(mode);
4848 		return NULL;
4849 	}
4850 
4851 	intel_encoder_get_config(encoder, crtc_state);
4852 
4853 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
4854 
4855 	kfree(crtc_state);
4856 
4857 	return mode;
4858 }
4859 
4860 /**
4861  * intel_wm_need_update - Check whether watermarks need updating
4862  * @cur: current plane state
4863  * @new: new plane state
4864  *
4865  * Check current plane state versus the new one to determine whether
4866  * watermarks need to be recalculated.
4867  *
4868  * Returns true or false.
4869  */
4870 static bool intel_wm_need_update(const struct intel_plane_state *cur,
4871 				 struct intel_plane_state *new)
4872 {
4873 	/* Update watermarks on tiling or size changes. */
4874 	if (new->uapi.visible != cur->uapi.visible)
4875 		return true;
4876 
4877 	if (!cur->hw.fb || !new->hw.fb)
4878 		return false;
4879 
4880 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
4881 	    cur->hw.rotation != new->hw.rotation ||
4882 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
4883 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
4884 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
4885 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
4886 		return true;
4887 
4888 	return false;
4889 }
4890 
4891 static bool needs_scaling(const struct intel_plane_state *state)
4892 {
4893 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
4894 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
4895 	int dst_w = drm_rect_width(&state->uapi.dst);
4896 	int dst_h = drm_rect_height(&state->uapi.dst);
4897 
4898 	return (src_w != dst_w || src_h != dst_h);
4899 }
4900 
4901 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
4902 				    struct intel_crtc_state *new_crtc_state,
4903 				    const struct intel_plane_state *old_plane_state,
4904 				    struct intel_plane_state *new_plane_state)
4905 {
4906 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
4907 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
4908 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4909 	bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
4910 	bool was_crtc_enabled = old_crtc_state->hw.active;
4911 	bool is_crtc_enabled = new_crtc_state->hw.active;
4912 	bool turn_off, turn_on, visible, was_visible;
4913 	int ret;
4914 
4915 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
4916 		ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
4917 		if (ret)
4918 			return ret;
4919 	}
4920 
4921 	was_visible = old_plane_state->uapi.visible;
4922 	visible = new_plane_state->uapi.visible;
4923 
4924 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
4925 		was_visible = false;
4926 
4927 	/*
4928 	 * Visibility is calculated as if the crtc was on, but
4929 	 * after scaler setup everything depends on it being off
4930 	 * when the crtc isn't active.
4931 	 *
4932 	 * FIXME this is wrong for watermarks. Watermarks should also
4933 	 * be computed as if the pipe would be active. Perhaps move
4934 	 * per-plane wm computation to the .check_plane() hook, and
4935 	 * only combine the results from all planes in the current place?
4936 	 */
4937 	if (!is_crtc_enabled) {
4938 		intel_plane_set_invisible(new_crtc_state, new_plane_state);
4939 		visible = false;
4940 	}
4941 
4942 	if (!was_visible && !visible)
4943 		return 0;
4944 
4945 	turn_off = was_visible && (!visible || mode_changed);
4946 	turn_on = visible && (!was_visible || mode_changed);
4947 
4948 	drm_dbg_atomic(&dev_priv->drm,
4949 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
4950 		       crtc->base.base.id, crtc->base.name,
4951 		       plane->base.base.id, plane->base.name,
4952 		       was_visible, visible,
4953 		       turn_off, turn_on, mode_changed);
4954 
4955 	if (turn_on) {
4956 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4957 			new_crtc_state->update_wm_pre = true;
4958 
4959 		/* must disable cxsr around plane enable/disable */
4960 		if (plane->id != PLANE_CURSOR)
4961 			new_crtc_state->disable_cxsr = true;
4962 	} else if (turn_off) {
4963 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
4964 			new_crtc_state->update_wm_post = true;
4965 
4966 		/* must disable cxsr around plane enable/disable */
4967 		if (plane->id != PLANE_CURSOR)
4968 			new_crtc_state->disable_cxsr = true;
4969 	} else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
4970 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
4971 			/* FIXME bollocks */
4972 			new_crtc_state->update_wm_pre = true;
4973 			new_crtc_state->update_wm_post = true;
4974 		}
4975 	}
4976 
4977 	if (visible || was_visible)
4978 		new_crtc_state->fb_bits |= plane->frontbuffer_bit;
4979 
4980 	/*
4981 	 * ILK/SNB DVSACNTR/Sprite Enable
4982 	 * IVB SPR_CTL/Sprite Enable
4983 	 * "When in Self Refresh Big FIFO mode, a write to enable the
4984 	 *  plane will be internally buffered and delayed while Big FIFO
4985 	 *  mode is exiting."
4986 	 *
4987 	 * Which means that enabling the sprite can take an extra frame
4988 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
4989 	 * down to LP0 and wait for vblank in order to make sure the
4990 	 * sprite gets enabled on the next vblank after the register write.
4991 	 * Doing otherwise would risk enabling the sprite one frame after
4992 	 * we've already signalled flip completion. We can resume LP1+
4993 	 * once the sprite has been enabled.
4994 	 *
4995 	 *
4996 	 * WaCxSRDisabledForSpriteScaling:ivb
4997 	 * IVB SPR_SCALE/Scaling Enable
4998 	 * "Low Power watermarks must be disabled for at least one
4999 	 *  frame before enabling sprite scaling, and kept disabled
5000 	 *  until sprite scaling is disabled."
5001 	 *
5002 	 * ILK/SNB DVSASCALE/Scaling Enable
5003 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
5004 	 *  masked off while Big FIFO mode is exiting."
5005 	 *
5006 	 * Despite the w/a only being listed for IVB we assume that
5007 	 * the ILK/SNB note has similar ramifications, hence we apply
5008 	 * the w/a on all three platforms.
5009 	 *
5010 	 * With experimental results seems this is needed also for primary
5011 	 * plane, not only sprite plane.
5012 	 */
5013 	if (plane->id != PLANE_CURSOR &&
5014 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
5015 	     IS_IVYBRIDGE(dev_priv)) &&
5016 	    (turn_on || (!needs_scaling(old_plane_state) &&
5017 			 needs_scaling(new_plane_state))))
5018 		new_crtc_state->disable_lp_wm = true;
5019 
5020 	return 0;
5021 }
5022 
5023 static bool encoders_cloneable(const struct intel_encoder *a,
5024 			       const struct intel_encoder *b)
5025 {
5026 	/* masks could be asymmetric, so check both ways */
5027 	return a == b || (a->cloneable & (1 << b->type) &&
5028 			  b->cloneable & (1 << a->type));
5029 }
5030 
5031 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
5032 					 struct intel_crtc *crtc,
5033 					 struct intel_encoder *encoder)
5034 {
5035 	struct intel_encoder *source_encoder;
5036 	struct drm_connector *connector;
5037 	struct drm_connector_state *connector_state;
5038 	int i;
5039 
5040 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5041 		if (connector_state->crtc != &crtc->base)
5042 			continue;
5043 
5044 		source_encoder =
5045 			to_intel_encoder(connector_state->best_encoder);
5046 		if (!encoders_cloneable(encoder, source_encoder))
5047 			return false;
5048 	}
5049 
5050 	return true;
5051 }
5052 
5053 static int icl_add_linked_planes(struct intel_atomic_state *state)
5054 {
5055 	struct intel_plane *plane, *linked;
5056 	struct intel_plane_state *plane_state, *linked_plane_state;
5057 	int i;
5058 
5059 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5060 		linked = plane_state->planar_linked_plane;
5061 
5062 		if (!linked)
5063 			continue;
5064 
5065 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
5066 		if (IS_ERR(linked_plane_state))
5067 			return PTR_ERR(linked_plane_state);
5068 
5069 		drm_WARN_ON(state->base.dev,
5070 			    linked_plane_state->planar_linked_plane != plane);
5071 		drm_WARN_ON(state->base.dev,
5072 			    linked_plane_state->planar_slave == plane_state->planar_slave);
5073 	}
5074 
5075 	return 0;
5076 }
5077 
5078 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
5079 {
5080 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5081 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5082 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
5083 	struct intel_plane *plane, *linked;
5084 	struct intel_plane_state *plane_state;
5085 	int i;
5086 
5087 	if (DISPLAY_VER(dev_priv) < 11)
5088 		return 0;
5089 
5090 	/*
5091 	 * Destroy all old plane links and make the slave plane invisible
5092 	 * in the crtc_state->active_planes mask.
5093 	 */
5094 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5095 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
5096 			continue;
5097 
5098 		plane_state->planar_linked_plane = NULL;
5099 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
5100 			crtc_state->enabled_planes &= ~BIT(plane->id);
5101 			crtc_state->active_planes &= ~BIT(plane->id);
5102 			crtc_state->update_planes |= BIT(plane->id);
5103 		}
5104 
5105 		plane_state->planar_slave = false;
5106 	}
5107 
5108 	if (!crtc_state->nv12_planes)
5109 		return 0;
5110 
5111 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5112 		struct intel_plane_state *linked_state = NULL;
5113 
5114 		if (plane->pipe != crtc->pipe ||
5115 		    !(crtc_state->nv12_planes & BIT(plane->id)))
5116 			continue;
5117 
5118 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
5119 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
5120 				continue;
5121 
5122 			if (crtc_state->active_planes & BIT(linked->id))
5123 				continue;
5124 
5125 			linked_state = intel_atomic_get_plane_state(state, linked);
5126 			if (IS_ERR(linked_state))
5127 				return PTR_ERR(linked_state);
5128 
5129 			break;
5130 		}
5131 
5132 		if (!linked_state) {
5133 			drm_dbg_kms(&dev_priv->drm,
5134 				    "Need %d free Y planes for planar YUV\n",
5135 				    hweight8(crtc_state->nv12_planes));
5136 
5137 			return -EINVAL;
5138 		}
5139 
5140 		plane_state->planar_linked_plane = linked;
5141 
5142 		linked_state->planar_slave = true;
5143 		linked_state->planar_linked_plane = plane;
5144 		crtc_state->enabled_planes |= BIT(linked->id);
5145 		crtc_state->active_planes |= BIT(linked->id);
5146 		crtc_state->update_planes |= BIT(linked->id);
5147 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
5148 			    linked->base.name, plane->base.name);
5149 
5150 		/* Copy parameters to slave plane */
5151 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
5152 		linked_state->color_ctl = plane_state->color_ctl;
5153 		linked_state->view = plane_state->view;
5154 		linked_state->decrypt = plane_state->decrypt;
5155 
5156 		intel_plane_copy_hw_state(linked_state, plane_state);
5157 		linked_state->uapi.src = plane_state->uapi.src;
5158 		linked_state->uapi.dst = plane_state->uapi.dst;
5159 
5160 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
5161 			if (linked->id == PLANE_SPRITE5)
5162 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
5163 			else if (linked->id == PLANE_SPRITE4)
5164 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
5165 			else if (linked->id == PLANE_SPRITE3)
5166 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
5167 			else if (linked->id == PLANE_SPRITE2)
5168 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
5169 			else
5170 				MISSING_CASE(linked->id);
5171 		}
5172 	}
5173 
5174 	return 0;
5175 }
5176 
5177 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
5178 {
5179 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
5180 	struct intel_atomic_state *state =
5181 		to_intel_atomic_state(new_crtc_state->uapi.state);
5182 	const struct intel_crtc_state *old_crtc_state =
5183 		intel_atomic_get_old_crtc_state(state, crtc);
5184 
5185 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
5186 }
5187 
5188 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
5189 {
5190 	const struct drm_display_mode *pipe_mode =
5191 		&crtc_state->hw.pipe_mode;
5192 	int linetime_wm;
5193 
5194 	if (!crtc_state->hw.enable)
5195 		return 0;
5196 
5197 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5198 					pipe_mode->crtc_clock);
5199 
5200 	return min(linetime_wm, 0x1ff);
5201 }
5202 
5203 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
5204 			       const struct intel_cdclk_state *cdclk_state)
5205 {
5206 	const struct drm_display_mode *pipe_mode =
5207 		&crtc_state->hw.pipe_mode;
5208 	int linetime_wm;
5209 
5210 	if (!crtc_state->hw.enable)
5211 		return 0;
5212 
5213 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
5214 					cdclk_state->logical.cdclk);
5215 
5216 	return min(linetime_wm, 0x1ff);
5217 }
5218 
5219 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
5220 {
5221 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5222 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5223 	const struct drm_display_mode *pipe_mode =
5224 		&crtc_state->hw.pipe_mode;
5225 	int linetime_wm;
5226 
5227 	if (!crtc_state->hw.enable)
5228 		return 0;
5229 
5230 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
5231 				   crtc_state->pixel_rate);
5232 
5233 	/* Display WA #1135: BXT:ALL GLK:ALL */
5234 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
5235 	    dev_priv->ipc_enabled)
5236 		linetime_wm /= 2;
5237 
5238 	return min(linetime_wm, 0x1ff);
5239 }
5240 
5241 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
5242 				   struct intel_crtc *crtc)
5243 {
5244 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5245 	struct intel_crtc_state *crtc_state =
5246 		intel_atomic_get_new_crtc_state(state, crtc);
5247 	const struct intel_cdclk_state *cdclk_state;
5248 
5249 	if (DISPLAY_VER(dev_priv) >= 9)
5250 		crtc_state->linetime = skl_linetime_wm(crtc_state);
5251 	else
5252 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
5253 
5254 	if (!hsw_crtc_supports_ips(crtc))
5255 		return 0;
5256 
5257 	cdclk_state = intel_atomic_get_cdclk_state(state);
5258 	if (IS_ERR(cdclk_state))
5259 		return PTR_ERR(cdclk_state);
5260 
5261 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
5262 						       cdclk_state);
5263 
5264 	return 0;
5265 }
5266 
5267 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
5268 				   struct intel_crtc *crtc)
5269 {
5270 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5271 	struct intel_crtc_state *crtc_state =
5272 		intel_atomic_get_new_crtc_state(state, crtc);
5273 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
5274 	int ret;
5275 
5276 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
5277 	    mode_changed && !crtc_state->hw.active)
5278 		crtc_state->update_wm_post = true;
5279 
5280 	if (mode_changed && crtc_state->hw.enable &&
5281 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
5282 		ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
5283 		if (ret)
5284 			return ret;
5285 	}
5286 
5287 	/*
5288 	 * May need to update pipe gamma enable bits
5289 	 * when C8 planes are getting enabled/disabled.
5290 	 */
5291 	if (c8_planes_changed(crtc_state))
5292 		crtc_state->uapi.color_mgmt_changed = true;
5293 
5294 	if (mode_changed || crtc_state->update_pipe ||
5295 	    crtc_state->uapi.color_mgmt_changed) {
5296 		ret = intel_color_check(crtc_state);
5297 		if (ret)
5298 			return ret;
5299 	}
5300 
5301 	ret = intel_compute_pipe_wm(state, crtc);
5302 	if (ret) {
5303 		drm_dbg_kms(&dev_priv->drm,
5304 			    "Target pipe watermarks are invalid\n");
5305 		return ret;
5306 	}
5307 
5308 	/*
5309 	 * Calculate 'intermediate' watermarks that satisfy both the
5310 	 * old state and the new state.  We can program these
5311 	 * immediately.
5312 	 */
5313 	ret = intel_compute_intermediate_wm(state, crtc);
5314 	if (ret) {
5315 		drm_dbg_kms(&dev_priv->drm,
5316 			    "No valid intermediate pipe watermarks are possible\n");
5317 		return ret;
5318 	}
5319 
5320 	if (DISPLAY_VER(dev_priv) >= 9) {
5321 		if (mode_changed || crtc_state->update_pipe) {
5322 			ret = skl_update_scaler_crtc(crtc_state);
5323 			if (ret)
5324 				return ret;
5325 		}
5326 
5327 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
5328 		if (ret)
5329 			return ret;
5330 	}
5331 
5332 	if (HAS_IPS(dev_priv)) {
5333 		ret = hsw_compute_ips_config(crtc_state);
5334 		if (ret)
5335 			return ret;
5336 	}
5337 
5338 	if (DISPLAY_VER(dev_priv) >= 9 ||
5339 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
5340 		ret = hsw_compute_linetime_wm(state, crtc);
5341 		if (ret)
5342 			return ret;
5343 
5344 	}
5345 
5346 	ret = intel_psr2_sel_fetch_update(state, crtc);
5347 	if (ret)
5348 		return ret;
5349 
5350 	return 0;
5351 }
5352 
5353 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
5354 {
5355 	struct intel_connector *connector;
5356 	struct drm_connector_list_iter conn_iter;
5357 
5358 	drm_connector_list_iter_begin(dev, &conn_iter);
5359 	for_each_intel_connector_iter(connector, &conn_iter) {
5360 		struct drm_connector_state *conn_state = connector->base.state;
5361 		struct intel_encoder *encoder =
5362 			to_intel_encoder(connector->base.encoder);
5363 
5364 		if (conn_state->crtc)
5365 			drm_connector_put(&connector->base);
5366 
5367 		if (encoder) {
5368 			struct intel_crtc *crtc =
5369 				to_intel_crtc(encoder->base.crtc);
5370 			const struct intel_crtc_state *crtc_state =
5371 				to_intel_crtc_state(crtc->base.state);
5372 
5373 			conn_state->best_encoder = &encoder->base;
5374 			conn_state->crtc = &crtc->base;
5375 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
5376 
5377 			drm_connector_get(&connector->base);
5378 		} else {
5379 			conn_state->best_encoder = NULL;
5380 			conn_state->crtc = NULL;
5381 		}
5382 	}
5383 	drm_connector_list_iter_end(&conn_iter);
5384 }
5385 
5386 static int
5387 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
5388 		      struct intel_crtc_state *pipe_config)
5389 {
5390 	struct drm_connector *connector = conn_state->connector;
5391 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5392 	const struct drm_display_info *info = &connector->display_info;
5393 	int bpp;
5394 
5395 	switch (conn_state->max_bpc) {
5396 	case 6 ... 7:
5397 		bpp = 6 * 3;
5398 		break;
5399 	case 8 ... 9:
5400 		bpp = 8 * 3;
5401 		break;
5402 	case 10 ... 11:
5403 		bpp = 10 * 3;
5404 		break;
5405 	case 12 ... 16:
5406 		bpp = 12 * 3;
5407 		break;
5408 	default:
5409 		MISSING_CASE(conn_state->max_bpc);
5410 		return -EINVAL;
5411 	}
5412 
5413 	if (bpp < pipe_config->pipe_bpp) {
5414 		drm_dbg_kms(&i915->drm,
5415 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
5416 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
5417 			    connector->base.id, connector->name,
5418 			    bpp, 3 * info->bpc,
5419 			    3 * conn_state->max_requested_bpc,
5420 			    pipe_config->pipe_bpp);
5421 
5422 		pipe_config->pipe_bpp = bpp;
5423 	}
5424 
5425 	return 0;
5426 }
5427 
5428 static int
5429 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5430 			  struct intel_crtc_state *pipe_config)
5431 {
5432 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5433 	struct drm_atomic_state *state = pipe_config->uapi.state;
5434 	struct drm_connector *connector;
5435 	struct drm_connector_state *connector_state;
5436 	int bpp, i;
5437 
5438 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
5439 	    IS_CHERRYVIEW(dev_priv)))
5440 		bpp = 10*3;
5441 	else if (DISPLAY_VER(dev_priv) >= 5)
5442 		bpp = 12*3;
5443 	else
5444 		bpp = 8*3;
5445 
5446 	pipe_config->pipe_bpp = bpp;
5447 
5448 	/* Clamp display bpp to connector max bpp */
5449 	for_each_new_connector_in_state(state, connector, connector_state, i) {
5450 		int ret;
5451 
5452 		if (connector_state->crtc != &crtc->base)
5453 			continue;
5454 
5455 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
5456 		if (ret)
5457 			return ret;
5458 	}
5459 
5460 	return 0;
5461 }
5462 
5463 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
5464 				    const struct drm_display_mode *mode)
5465 {
5466 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
5467 		    "type: 0x%x flags: 0x%x\n",
5468 		    mode->crtc_clock,
5469 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
5470 		    mode->crtc_hsync_end, mode->crtc_htotal,
5471 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
5472 		    mode->crtc_vsync_end, mode->crtc_vtotal,
5473 		    mode->type, mode->flags);
5474 }
5475 
5476 static void
5477 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
5478 		      const char *id, unsigned int lane_count,
5479 		      const struct intel_link_m_n *m_n)
5480 {
5481 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5482 
5483 	drm_dbg_kms(&i915->drm,
5484 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
5485 		    id, lane_count,
5486 		    m_n->gmch_m, m_n->gmch_n,
5487 		    m_n->link_m, m_n->link_n, m_n->tu);
5488 }
5489 
5490 static void
5491 intel_dump_infoframe(struct drm_i915_private *dev_priv,
5492 		     const union hdmi_infoframe *frame)
5493 {
5494 	if (!drm_debug_enabled(DRM_UT_KMS))
5495 		return;
5496 
5497 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
5498 }
5499 
5500 static void
5501 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
5502 		      const struct drm_dp_vsc_sdp *vsc)
5503 {
5504 	if (!drm_debug_enabled(DRM_UT_KMS))
5505 		return;
5506 
5507 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
5508 }
5509 
5510 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
5511 
5512 static const char * const output_type_str[] = {
5513 	OUTPUT_TYPE(UNUSED),
5514 	OUTPUT_TYPE(ANALOG),
5515 	OUTPUT_TYPE(DVO),
5516 	OUTPUT_TYPE(SDVO),
5517 	OUTPUT_TYPE(LVDS),
5518 	OUTPUT_TYPE(TVOUT),
5519 	OUTPUT_TYPE(HDMI),
5520 	OUTPUT_TYPE(DP),
5521 	OUTPUT_TYPE(EDP),
5522 	OUTPUT_TYPE(DSI),
5523 	OUTPUT_TYPE(DDI),
5524 	OUTPUT_TYPE(DP_MST),
5525 };
5526 
5527 #undef OUTPUT_TYPE
5528 
5529 static void snprintf_output_types(char *buf, size_t len,
5530 				  unsigned int output_types)
5531 {
5532 	char *str = buf;
5533 	int i;
5534 
5535 	str[0] = '\0';
5536 
5537 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
5538 		int r;
5539 
5540 		if ((output_types & BIT(i)) == 0)
5541 			continue;
5542 
5543 		r = snprintf(str, len, "%s%s",
5544 			     str != buf ? "," : "", output_type_str[i]);
5545 		if (r >= len)
5546 			break;
5547 		str += r;
5548 		len -= r;
5549 
5550 		output_types &= ~BIT(i);
5551 	}
5552 
5553 	WARN_ON_ONCE(output_types != 0);
5554 }
5555 
5556 static const char * const output_format_str[] = {
5557 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
5558 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
5559 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
5560 };
5561 
5562 static const char *output_formats(enum intel_output_format format)
5563 {
5564 	if (format >= ARRAY_SIZE(output_format_str))
5565 		return "invalid";
5566 	return output_format_str[format];
5567 }
5568 
5569 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
5570 {
5571 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
5572 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
5573 	const struct drm_framebuffer *fb = plane_state->hw.fb;
5574 
5575 	if (!fb) {
5576 		drm_dbg_kms(&i915->drm,
5577 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
5578 			    plane->base.base.id, plane->base.name,
5579 			    yesno(plane_state->uapi.visible));
5580 		return;
5581 	}
5582 
5583 	drm_dbg_kms(&i915->drm,
5584 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
5585 		    plane->base.base.id, plane->base.name,
5586 		    fb->base.id, fb->width, fb->height, &fb->format->format,
5587 		    fb->modifier, yesno(plane_state->uapi.visible));
5588 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
5589 		    plane_state->hw.rotation, plane_state->scaler_id);
5590 	if (plane_state->uapi.visible)
5591 		drm_dbg_kms(&i915->drm,
5592 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
5593 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
5594 			    DRM_RECT_ARG(&plane_state->uapi.dst));
5595 }
5596 
5597 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
5598 				   struct intel_atomic_state *state,
5599 				   const char *context)
5600 {
5601 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
5602 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5603 	const struct intel_plane_state *plane_state;
5604 	struct intel_plane *plane;
5605 	char buf[64];
5606 	int i;
5607 
5608 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
5609 		    crtc->base.base.id, crtc->base.name,
5610 		    yesno(pipe_config->hw.enable), context);
5611 
5612 	if (!pipe_config->hw.enable)
5613 		goto dump_planes;
5614 
5615 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
5616 	drm_dbg_kms(&dev_priv->drm,
5617 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
5618 		    yesno(pipe_config->hw.active),
5619 		    buf, pipe_config->output_types,
5620 		    output_formats(pipe_config->output_format));
5621 
5622 	drm_dbg_kms(&dev_priv->drm,
5623 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
5624 		    transcoder_name(pipe_config->cpu_transcoder),
5625 		    pipe_config->pipe_bpp, pipe_config->dither);
5626 
5627 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
5628 		    transcoder_name(pipe_config->mst_master_transcoder));
5629 
5630 	drm_dbg_kms(&dev_priv->drm,
5631 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
5632 		    transcoder_name(pipe_config->master_transcoder),
5633 		    pipe_config->sync_mode_slaves_mask);
5634 
5635 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
5636 		    pipe_config->bigjoiner_slave ? "slave" :
5637 		    pipe_config->bigjoiner ? "master" : "no");
5638 
5639 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
5640 		    enableddisabled(pipe_config->splitter.enable),
5641 		    pipe_config->splitter.link_count,
5642 		    pipe_config->splitter.pixel_overlap);
5643 
5644 	if (pipe_config->has_pch_encoder)
5645 		intel_dump_m_n_config(pipe_config, "fdi",
5646 				      pipe_config->fdi_lanes,
5647 				      &pipe_config->fdi_m_n);
5648 
5649 	if (intel_crtc_has_dp_encoder(pipe_config)) {
5650 		intel_dump_m_n_config(pipe_config, "dp m_n",
5651 				pipe_config->lane_count, &pipe_config->dp_m_n);
5652 		if (pipe_config->has_drrs)
5653 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
5654 					      pipe_config->lane_count,
5655 					      &pipe_config->dp_m2_n2);
5656 	}
5657 
5658 	drm_dbg_kms(&dev_priv->drm,
5659 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
5660 		    pipe_config->has_audio, pipe_config->has_infoframe,
5661 		    pipe_config->infoframes.enable);
5662 
5663 	if (pipe_config->infoframes.enable &
5664 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
5665 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
5666 			    pipe_config->infoframes.gcp);
5667 	if (pipe_config->infoframes.enable &
5668 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
5669 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
5670 	if (pipe_config->infoframes.enable &
5671 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
5672 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
5673 	if (pipe_config->infoframes.enable &
5674 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
5675 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
5676 	if (pipe_config->infoframes.enable &
5677 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
5678 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5679 	if (pipe_config->infoframes.enable &
5680 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
5681 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
5682 	if (pipe_config->infoframes.enable &
5683 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
5684 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
5685 
5686 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
5687 		    yesno(pipe_config->vrr.enable),
5688 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
5689 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
5690 		    pipe_config->vrr.flipline,
5691 		    intel_vrr_vmin_vblank_start(pipe_config),
5692 		    intel_vrr_vmax_vblank_start(pipe_config));
5693 
5694 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
5695 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
5696 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
5697 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
5698 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
5699 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
5700 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
5701 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
5702 	drm_dbg_kms(&dev_priv->drm,
5703 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
5704 		    pipe_config->port_clock,
5705 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
5706 		    pipe_config->pixel_rate);
5707 
5708 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
5709 		    pipe_config->linetime, pipe_config->ips_linetime);
5710 
5711 	if (DISPLAY_VER(dev_priv) >= 9)
5712 		drm_dbg_kms(&dev_priv->drm,
5713 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
5714 			    crtc->num_scalers,
5715 			    pipe_config->scaler_state.scaler_users,
5716 			    pipe_config->scaler_state.scaler_id);
5717 
5718 	if (HAS_GMCH(dev_priv))
5719 		drm_dbg_kms(&dev_priv->drm,
5720 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
5721 			    pipe_config->gmch_pfit.control,
5722 			    pipe_config->gmch_pfit.pgm_ratios,
5723 			    pipe_config->gmch_pfit.lvds_border_bits);
5724 	else
5725 		drm_dbg_kms(&dev_priv->drm,
5726 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
5727 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
5728 			    enableddisabled(pipe_config->pch_pfit.enabled),
5729 			    yesno(pipe_config->pch_pfit.force_thru));
5730 
5731 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
5732 		    pipe_config->ips_enabled, pipe_config->double_wide);
5733 
5734 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
5735 
5736 	if (IS_CHERRYVIEW(dev_priv))
5737 		drm_dbg_kms(&dev_priv->drm,
5738 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5739 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
5740 			    pipe_config->gamma_enable, pipe_config->csc_enable);
5741 	else
5742 		drm_dbg_kms(&dev_priv->drm,
5743 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
5744 			    pipe_config->csc_mode, pipe_config->gamma_mode,
5745 			    pipe_config->gamma_enable, pipe_config->csc_enable);
5746 
5747 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
5748 		    pipe_config->hw.degamma_lut ?
5749 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
5750 		    pipe_config->hw.gamma_lut ?
5751 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
5752 
5753 dump_planes:
5754 	if (!state)
5755 		return;
5756 
5757 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
5758 		if (plane->pipe == crtc->pipe)
5759 			intel_dump_plane_state(plane_state);
5760 	}
5761 }
5762 
5763 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
5764 {
5765 	struct drm_device *dev = state->base.dev;
5766 	struct drm_connector *connector;
5767 	struct drm_connector_list_iter conn_iter;
5768 	unsigned int used_ports = 0;
5769 	unsigned int used_mst_ports = 0;
5770 	bool ret = true;
5771 
5772 	/*
5773 	 * We're going to peek into connector->state,
5774 	 * hence connection_mutex must be held.
5775 	 */
5776 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
5777 
5778 	/*
5779 	 * Walk the connector list instead of the encoder
5780 	 * list to detect the problem on ddi platforms
5781 	 * where there's just one encoder per digital port.
5782 	 */
5783 	drm_connector_list_iter_begin(dev, &conn_iter);
5784 	drm_for_each_connector_iter(connector, &conn_iter) {
5785 		struct drm_connector_state *connector_state;
5786 		struct intel_encoder *encoder;
5787 
5788 		connector_state =
5789 			drm_atomic_get_new_connector_state(&state->base,
5790 							   connector);
5791 		if (!connector_state)
5792 			connector_state = connector->state;
5793 
5794 		if (!connector_state->best_encoder)
5795 			continue;
5796 
5797 		encoder = to_intel_encoder(connector_state->best_encoder);
5798 
5799 		drm_WARN_ON(dev, !connector_state->crtc);
5800 
5801 		switch (encoder->type) {
5802 		case INTEL_OUTPUT_DDI:
5803 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
5804 				break;
5805 			fallthrough;
5806 		case INTEL_OUTPUT_DP:
5807 		case INTEL_OUTPUT_HDMI:
5808 		case INTEL_OUTPUT_EDP:
5809 			/* the same port mustn't appear more than once */
5810 			if (used_ports & BIT(encoder->port))
5811 				ret = false;
5812 
5813 			used_ports |= BIT(encoder->port);
5814 			break;
5815 		case INTEL_OUTPUT_DP_MST:
5816 			used_mst_ports |=
5817 				1 << encoder->port;
5818 			break;
5819 		default:
5820 			break;
5821 		}
5822 	}
5823 	drm_connector_list_iter_end(&conn_iter);
5824 
5825 	/* can't mix MST and SST/HDMI on the same port */
5826 	if (used_ports & used_mst_ports)
5827 		return false;
5828 
5829 	return ret;
5830 }
5831 
5832 static void
5833 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
5834 					   struct intel_crtc_state *crtc_state)
5835 {
5836 	const struct intel_crtc_state *master_crtc_state;
5837 	struct intel_crtc *master_crtc;
5838 
5839 	master_crtc = intel_master_crtc(crtc_state);
5840 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
5841 
5842 	/* No need to copy state if the master state is unchanged */
5843 	if (master_crtc_state)
5844 		intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
5845 }
5846 
5847 static void
5848 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
5849 				 struct intel_crtc_state *crtc_state)
5850 {
5851 	crtc_state->hw.enable = crtc_state->uapi.enable;
5852 	crtc_state->hw.active = crtc_state->uapi.active;
5853 	crtc_state->hw.mode = crtc_state->uapi.mode;
5854 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
5855 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
5856 
5857 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
5858 }
5859 
5860 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
5861 {
5862 	if (crtc_state->bigjoiner_slave)
5863 		return;
5864 
5865 	crtc_state->uapi.enable = crtc_state->hw.enable;
5866 	crtc_state->uapi.active = crtc_state->hw.active;
5867 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
5868 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
5869 
5870 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
5871 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
5872 
5873 	/* copy color blobs to uapi */
5874 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
5875 				  crtc_state->hw.degamma_lut);
5876 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
5877 				  crtc_state->hw.gamma_lut);
5878 	drm_property_replace_blob(&crtc_state->uapi.ctm,
5879 				  crtc_state->hw.ctm);
5880 }
5881 
5882 static int
5883 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
5884 			  const struct intel_crtc_state *from_crtc_state)
5885 {
5886 	struct intel_crtc_state *saved_state;
5887 
5888 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
5889 	if (!saved_state)
5890 		return -ENOMEM;
5891 
5892 	saved_state->uapi = crtc_state->uapi;
5893 	saved_state->scaler_state = crtc_state->scaler_state;
5894 	saved_state->shared_dpll = crtc_state->shared_dpll;
5895 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5896 	saved_state->crc_enabled = crtc_state->crc_enabled;
5897 
5898 	intel_crtc_free_hw_state(crtc_state);
5899 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5900 	kfree(saved_state);
5901 
5902 	/* Re-init hw state */
5903 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
5904 	crtc_state->hw.enable = from_crtc_state->hw.enable;
5905 	crtc_state->hw.active = from_crtc_state->hw.active;
5906 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
5907 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
5908 
5909 	/* Some fixups */
5910 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
5911 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
5912 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
5913 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
5914 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
5915 	crtc_state->bigjoiner_slave = true;
5916 	crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
5917 	crtc_state->has_audio = from_crtc_state->has_audio;
5918 
5919 	return 0;
5920 }
5921 
5922 static int
5923 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
5924 				 struct intel_crtc_state *crtc_state)
5925 {
5926 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5927 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5928 	struct intel_crtc_state *saved_state;
5929 
5930 	saved_state = intel_crtc_state_alloc(crtc);
5931 	if (!saved_state)
5932 		return -ENOMEM;
5933 
5934 	/* free the old crtc_state->hw members */
5935 	intel_crtc_free_hw_state(crtc_state);
5936 
5937 	/* FIXME: before the switch to atomic started, a new pipe_config was
5938 	 * kzalloc'd. Code that depends on any field being zero should be
5939 	 * fixed, so that the crtc_state can be safely duplicated. For now,
5940 	 * only fields that are know to not cause problems are preserved. */
5941 
5942 	saved_state->uapi = crtc_state->uapi;
5943 	saved_state->scaler_state = crtc_state->scaler_state;
5944 	saved_state->shared_dpll = crtc_state->shared_dpll;
5945 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
5946 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
5947 	       sizeof(saved_state->icl_port_dplls));
5948 	saved_state->crc_enabled = crtc_state->crc_enabled;
5949 	if (IS_G4X(dev_priv) ||
5950 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5951 		saved_state->wm = crtc_state->wm;
5952 
5953 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
5954 	kfree(saved_state);
5955 
5956 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
5957 
5958 	return 0;
5959 }
5960 
5961 static int
5962 intel_modeset_pipe_config(struct intel_atomic_state *state,
5963 			  struct intel_crtc_state *pipe_config)
5964 {
5965 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
5966 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
5967 	struct drm_connector *connector;
5968 	struct drm_connector_state *connector_state;
5969 	int base_bpp, ret, i;
5970 	bool retry = true;
5971 
5972 	pipe_config->cpu_transcoder =
5973 		(enum transcoder) to_intel_crtc(crtc)->pipe;
5974 
5975 	/*
5976 	 * Sanitize sync polarity flags based on requested ones. If neither
5977 	 * positive or negative polarity is requested, treat this as meaning
5978 	 * negative polarity.
5979 	 */
5980 	if (!(pipe_config->hw.adjusted_mode.flags &
5981 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
5982 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
5983 
5984 	if (!(pipe_config->hw.adjusted_mode.flags &
5985 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
5986 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
5987 
5988 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
5989 					pipe_config);
5990 	if (ret)
5991 		return ret;
5992 
5993 	base_bpp = pipe_config->pipe_bpp;
5994 
5995 	/*
5996 	 * Determine the real pipe dimensions. Note that stereo modes can
5997 	 * increase the actual pipe size due to the frame doubling and
5998 	 * insertion of additional space for blanks between the frame. This
5999 	 * is stored in the crtc timings. We use the requested mode to do this
6000 	 * computation to clearly distinguish it from the adjusted mode, which
6001 	 * can be changed by the connectors in the below retry loop.
6002 	 */
6003 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
6004 			       &pipe_config->pipe_src_w,
6005 			       &pipe_config->pipe_src_h);
6006 
6007 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6008 		struct intel_encoder *encoder =
6009 			to_intel_encoder(connector_state->best_encoder);
6010 
6011 		if (connector_state->crtc != crtc)
6012 			continue;
6013 
6014 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
6015 			drm_dbg_kms(&i915->drm,
6016 				    "rejecting invalid cloning configuration\n");
6017 			return -EINVAL;
6018 		}
6019 
6020 		/*
6021 		 * Determine output_types before calling the .compute_config()
6022 		 * hooks so that the hooks can use this information safely.
6023 		 */
6024 		if (encoder->compute_output_type)
6025 			pipe_config->output_types |=
6026 				BIT(encoder->compute_output_type(encoder, pipe_config,
6027 								 connector_state));
6028 		else
6029 			pipe_config->output_types |= BIT(encoder->type);
6030 	}
6031 
6032 encoder_retry:
6033 	/* Ensure the port clock defaults are reset when retrying. */
6034 	pipe_config->port_clock = 0;
6035 	pipe_config->pixel_multiplier = 1;
6036 
6037 	/* Fill in default crtc timings, allow encoders to overwrite them. */
6038 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
6039 			      CRTC_STEREO_DOUBLE);
6040 
6041 	/* Pass our mode to the connectors and the CRTC to give them a chance to
6042 	 * adjust it according to limitations or connector properties, and also
6043 	 * a chance to reject the mode entirely.
6044 	 */
6045 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6046 		struct intel_encoder *encoder =
6047 			to_intel_encoder(connector_state->best_encoder);
6048 
6049 		if (connector_state->crtc != crtc)
6050 			continue;
6051 
6052 		ret = encoder->compute_config(encoder, pipe_config,
6053 					      connector_state);
6054 		if (ret == -EDEADLK)
6055 			return ret;
6056 		if (ret < 0) {
6057 			drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
6058 			return ret;
6059 		}
6060 	}
6061 
6062 	/* Set default port clock if not overwritten by the encoder. Needs to be
6063 	 * done afterwards in case the encoder adjusts the mode. */
6064 	if (!pipe_config->port_clock)
6065 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
6066 			* pipe_config->pixel_multiplier;
6067 
6068 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
6069 	if (ret == -EDEADLK)
6070 		return ret;
6071 	if (ret == -EAGAIN) {
6072 		if (drm_WARN(&i915->drm, !retry,
6073 			     "loop in pipe configuration computation\n"))
6074 			return -EINVAL;
6075 
6076 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
6077 		retry = false;
6078 		goto encoder_retry;
6079 	}
6080 	if (ret < 0) {
6081 		drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
6082 		return ret;
6083 	}
6084 
6085 	/* Dithering seems to not pass-through bits correctly when it should, so
6086 	 * only enable it on 6bpc panels and when its not a compliance
6087 	 * test requesting 6bpc video pattern.
6088 	 */
6089 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
6090 		!pipe_config->dither_force_disable;
6091 	drm_dbg_kms(&i915->drm,
6092 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
6093 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
6094 
6095 	return 0;
6096 }
6097 
6098 static int
6099 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
6100 {
6101 	struct intel_atomic_state *state =
6102 		to_intel_atomic_state(crtc_state->uapi.state);
6103 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6104 	struct drm_connector_state *conn_state;
6105 	struct drm_connector *connector;
6106 	int i;
6107 
6108 	for_each_new_connector_in_state(&state->base, connector,
6109 					conn_state, i) {
6110 		struct intel_encoder *encoder =
6111 			to_intel_encoder(conn_state->best_encoder);
6112 		int ret;
6113 
6114 		if (conn_state->crtc != &crtc->base ||
6115 		    !encoder->compute_config_late)
6116 			continue;
6117 
6118 		ret = encoder->compute_config_late(encoder, crtc_state,
6119 						   conn_state);
6120 		if (ret)
6121 			return ret;
6122 	}
6123 
6124 	return 0;
6125 }
6126 
6127 bool intel_fuzzy_clock_check(int clock1, int clock2)
6128 {
6129 	int diff;
6130 
6131 	if (clock1 == clock2)
6132 		return true;
6133 
6134 	if (!clock1 || !clock2)
6135 		return false;
6136 
6137 	diff = abs(clock1 - clock2);
6138 
6139 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
6140 		return true;
6141 
6142 	return false;
6143 }
6144 
6145 static bool
6146 intel_compare_m_n(unsigned int m, unsigned int n,
6147 		  unsigned int m2, unsigned int n2,
6148 		  bool exact)
6149 {
6150 	if (m == m2 && n == n2)
6151 		return true;
6152 
6153 	if (exact || !m || !n || !m2 || !n2)
6154 		return false;
6155 
6156 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
6157 
6158 	if (n > n2) {
6159 		while (n > n2) {
6160 			m2 <<= 1;
6161 			n2 <<= 1;
6162 		}
6163 	} else if (n < n2) {
6164 		while (n < n2) {
6165 			m <<= 1;
6166 			n <<= 1;
6167 		}
6168 	}
6169 
6170 	if (n != n2)
6171 		return false;
6172 
6173 	return intel_fuzzy_clock_check(m, m2);
6174 }
6175 
6176 static bool
6177 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
6178 		       const struct intel_link_m_n *m2_n2,
6179 		       bool exact)
6180 {
6181 	return m_n->tu == m2_n2->tu &&
6182 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
6183 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
6184 		intel_compare_m_n(m_n->link_m, m_n->link_n,
6185 				  m2_n2->link_m, m2_n2->link_n, exact);
6186 }
6187 
6188 static bool
6189 intel_compare_infoframe(const union hdmi_infoframe *a,
6190 			const union hdmi_infoframe *b)
6191 {
6192 	return memcmp(a, b, sizeof(*a)) == 0;
6193 }
6194 
6195 static bool
6196 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
6197 			 const struct drm_dp_vsc_sdp *b)
6198 {
6199 	return memcmp(a, b, sizeof(*a)) == 0;
6200 }
6201 
6202 static void
6203 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
6204 			       bool fastset, const char *name,
6205 			       const union hdmi_infoframe *a,
6206 			       const union hdmi_infoframe *b)
6207 {
6208 	if (fastset) {
6209 		if (!drm_debug_enabled(DRM_UT_KMS))
6210 			return;
6211 
6212 		drm_dbg_kms(&dev_priv->drm,
6213 			    "fastset mismatch in %s infoframe\n", name);
6214 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
6215 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
6216 		drm_dbg_kms(&dev_priv->drm, "found:\n");
6217 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
6218 	} else {
6219 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
6220 		drm_err(&dev_priv->drm, "expected:\n");
6221 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
6222 		drm_err(&dev_priv->drm, "found:\n");
6223 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
6224 	}
6225 }
6226 
6227 static void
6228 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
6229 				bool fastset, const char *name,
6230 				const struct drm_dp_vsc_sdp *a,
6231 				const struct drm_dp_vsc_sdp *b)
6232 {
6233 	if (fastset) {
6234 		if (!drm_debug_enabled(DRM_UT_KMS))
6235 			return;
6236 
6237 		drm_dbg_kms(&dev_priv->drm,
6238 			    "fastset mismatch in %s dp sdp\n", name);
6239 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
6240 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
6241 		drm_dbg_kms(&dev_priv->drm, "found:\n");
6242 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
6243 	} else {
6244 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
6245 		drm_err(&dev_priv->drm, "expected:\n");
6246 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
6247 		drm_err(&dev_priv->drm, "found:\n");
6248 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
6249 	}
6250 }
6251 
6252 static void __printf(4, 5)
6253 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
6254 		     const char *name, const char *format, ...)
6255 {
6256 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6257 	struct va_format vaf;
6258 	va_list args;
6259 
6260 	va_start(args, format);
6261 	vaf.fmt = format;
6262 	vaf.va = &args;
6263 
6264 	if (fastset)
6265 		drm_dbg_kms(&i915->drm,
6266 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
6267 			    crtc->base.base.id, crtc->base.name, name, &vaf);
6268 	else
6269 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
6270 			crtc->base.base.id, crtc->base.name, name, &vaf);
6271 
6272 	va_end(args);
6273 }
6274 
6275 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
6276 {
6277 	if (dev_priv->params.fastboot != -1)
6278 		return dev_priv->params.fastboot;
6279 
6280 	/* Enable fastboot by default on Skylake and newer */
6281 	if (DISPLAY_VER(dev_priv) >= 9)
6282 		return true;
6283 
6284 	/* Enable fastboot by default on VLV and CHV */
6285 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6286 		return true;
6287 
6288 	/* Disabled by default on all others */
6289 	return false;
6290 }
6291 
6292 static bool
6293 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
6294 			  const struct intel_crtc_state *pipe_config,
6295 			  bool fastset)
6296 {
6297 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
6298 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
6299 	bool ret = true;
6300 	u32 bp_gamma = 0;
6301 	bool fixup_inherited = fastset &&
6302 		current_config->inherited && !pipe_config->inherited;
6303 
6304 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
6305 		drm_dbg_kms(&dev_priv->drm,
6306 			    "initial modeset and fastboot not set\n");
6307 		ret = false;
6308 	}
6309 
6310 #define PIPE_CONF_CHECK_X(name) do { \
6311 	if (current_config->name != pipe_config->name) { \
6312 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6313 				     "(expected 0x%08x, found 0x%08x)", \
6314 				     current_config->name, \
6315 				     pipe_config->name); \
6316 		ret = false; \
6317 	} \
6318 } while (0)
6319 
6320 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
6321 	if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
6322 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6323 				     "(expected 0x%08x, found 0x%08x)", \
6324 				     current_config->name & (mask), \
6325 				     pipe_config->name & (mask)); \
6326 		ret = false; \
6327 	} \
6328 } while (0)
6329 
6330 #define PIPE_CONF_CHECK_I(name) do { \
6331 	if (current_config->name != pipe_config->name) { \
6332 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6333 				     "(expected %i, found %i)", \
6334 				     current_config->name, \
6335 				     pipe_config->name); \
6336 		ret = false; \
6337 	} \
6338 } while (0)
6339 
6340 #define PIPE_CONF_CHECK_BOOL(name) do { \
6341 	if (current_config->name != pipe_config->name) { \
6342 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
6343 				     "(expected %s, found %s)", \
6344 				     yesno(current_config->name), \
6345 				     yesno(pipe_config->name)); \
6346 		ret = false; \
6347 	} \
6348 } while (0)
6349 
6350 /*
6351  * Checks state where we only read out the enabling, but not the entire
6352  * state itself (like full infoframes or ELD for audio). These states
6353  * require a full modeset on bootup to fix up.
6354  */
6355 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
6356 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
6357 		PIPE_CONF_CHECK_BOOL(name); \
6358 	} else { \
6359 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6360 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
6361 				     yesno(current_config->name), \
6362 				     yesno(pipe_config->name)); \
6363 		ret = false; \
6364 	} \
6365 } while (0)
6366 
6367 #define PIPE_CONF_CHECK_P(name) do { \
6368 	if (current_config->name != pipe_config->name) { \
6369 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6370 				     "(expected %p, found %p)", \
6371 				     current_config->name, \
6372 				     pipe_config->name); \
6373 		ret = false; \
6374 	} \
6375 } while (0)
6376 
6377 #define PIPE_CONF_CHECK_M_N(name) do { \
6378 	if (!intel_compare_link_m_n(&current_config->name, \
6379 				    &pipe_config->name,\
6380 				    !fastset)) { \
6381 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6382 				     "(expected tu %i gmch %i/%i link %i/%i, " \
6383 				     "found tu %i, gmch %i/%i link %i/%i)", \
6384 				     current_config->name.tu, \
6385 				     current_config->name.gmch_m, \
6386 				     current_config->name.gmch_n, \
6387 				     current_config->name.link_m, \
6388 				     current_config->name.link_n, \
6389 				     pipe_config->name.tu, \
6390 				     pipe_config->name.gmch_m, \
6391 				     pipe_config->name.gmch_n, \
6392 				     pipe_config->name.link_m, \
6393 				     pipe_config->name.link_n); \
6394 		ret = false; \
6395 	} \
6396 } while (0)
6397 
6398 /* This is required for BDW+ where there is only one set of registers for
6399  * switching between high and low RR.
6400  * This macro can be used whenever a comparison has to be made between one
6401  * hw state and multiple sw state variables.
6402  */
6403 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
6404 	if (!intel_compare_link_m_n(&current_config->name, \
6405 				    &pipe_config->name, !fastset) && \
6406 	    !intel_compare_link_m_n(&current_config->alt_name, \
6407 				    &pipe_config->name, !fastset)) { \
6408 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6409 				     "(expected tu %i gmch %i/%i link %i/%i, " \
6410 				     "or tu %i gmch %i/%i link %i/%i, " \
6411 				     "found tu %i, gmch %i/%i link %i/%i)", \
6412 				     current_config->name.tu, \
6413 				     current_config->name.gmch_m, \
6414 				     current_config->name.gmch_n, \
6415 				     current_config->name.link_m, \
6416 				     current_config->name.link_n, \
6417 				     current_config->alt_name.tu, \
6418 				     current_config->alt_name.gmch_m, \
6419 				     current_config->alt_name.gmch_n, \
6420 				     current_config->alt_name.link_m, \
6421 				     current_config->alt_name.link_n, \
6422 				     pipe_config->name.tu, \
6423 				     pipe_config->name.gmch_m, \
6424 				     pipe_config->name.gmch_n, \
6425 				     pipe_config->name.link_m, \
6426 				     pipe_config->name.link_n); \
6427 		ret = false; \
6428 	} \
6429 } while (0)
6430 
6431 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
6432 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
6433 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6434 				     "(%x) (expected %i, found %i)", \
6435 				     (mask), \
6436 				     current_config->name & (mask), \
6437 				     pipe_config->name & (mask)); \
6438 		ret = false; \
6439 	} \
6440 } while (0)
6441 
6442 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
6443 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
6444 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
6445 				     "(expected %i, found %i)", \
6446 				     current_config->name, \
6447 				     pipe_config->name); \
6448 		ret = false; \
6449 	} \
6450 } while (0)
6451 
6452 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
6453 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
6454 				     &pipe_config->infoframes.name)) { \
6455 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
6456 					       &current_config->infoframes.name, \
6457 					       &pipe_config->infoframes.name); \
6458 		ret = false; \
6459 	} \
6460 } while (0)
6461 
6462 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
6463 	if (!current_config->has_psr && !pipe_config->has_psr && \
6464 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
6465 				      &pipe_config->infoframes.name)) { \
6466 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
6467 						&current_config->infoframes.name, \
6468 						&pipe_config->infoframes.name); \
6469 		ret = false; \
6470 	} \
6471 } while (0)
6472 
6473 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
6474 	if (current_config->name1 != pipe_config->name1) { \
6475 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
6476 				"(expected %i, found %i, won't compare lut values)", \
6477 				current_config->name1, \
6478 				pipe_config->name1); \
6479 		ret = false;\
6480 	} else { \
6481 		if (!intel_color_lut_equal(current_config->name2, \
6482 					pipe_config->name2, pipe_config->name1, \
6483 					bit_precision)) { \
6484 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
6485 					"hw_state doesn't match sw_state"); \
6486 			ret = false; \
6487 		} \
6488 	} \
6489 } while (0)
6490 
6491 #define PIPE_CONF_QUIRK(quirk) \
6492 	((current_config->quirks | pipe_config->quirks) & (quirk))
6493 
6494 	PIPE_CONF_CHECK_I(cpu_transcoder);
6495 
6496 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
6497 	PIPE_CONF_CHECK_I(fdi_lanes);
6498 	PIPE_CONF_CHECK_M_N(fdi_m_n);
6499 
6500 	PIPE_CONF_CHECK_I(lane_count);
6501 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
6502 
6503 	if (DISPLAY_VER(dev_priv) < 8) {
6504 		PIPE_CONF_CHECK_M_N(dp_m_n);
6505 
6506 		if (current_config->has_drrs)
6507 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
6508 	} else
6509 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
6510 
6511 	PIPE_CONF_CHECK_X(output_types);
6512 
6513 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
6514 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
6515 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
6516 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
6517 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
6518 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
6519 
6520 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
6521 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
6522 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
6523 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
6524 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
6525 	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
6526 
6527 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
6528 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
6529 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
6530 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
6531 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
6532 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
6533 
6534 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
6535 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
6536 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
6537 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
6538 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
6539 	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
6540 
6541 	PIPE_CONF_CHECK_I(pixel_multiplier);
6542 
6543 	PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6544 			      DRM_MODE_FLAG_INTERLACE);
6545 
6546 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
6547 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6548 				      DRM_MODE_FLAG_PHSYNC);
6549 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6550 				      DRM_MODE_FLAG_NHSYNC);
6551 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6552 				      DRM_MODE_FLAG_PVSYNC);
6553 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
6554 				      DRM_MODE_FLAG_NVSYNC);
6555 	}
6556 
6557 	PIPE_CONF_CHECK_I(output_format);
6558 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
6559 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
6560 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6561 		PIPE_CONF_CHECK_BOOL(limited_color_range);
6562 
6563 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
6564 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
6565 	PIPE_CONF_CHECK_BOOL(has_infoframe);
6566 	PIPE_CONF_CHECK_BOOL(fec_enable);
6567 
6568 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
6569 
6570 	PIPE_CONF_CHECK_X(gmch_pfit.control);
6571 	/* pfit ratios are autocomputed by the hw on gen4+ */
6572 	if (DISPLAY_VER(dev_priv) < 4)
6573 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
6574 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
6575 
6576 	/*
6577 	 * Changing the EDP transcoder input mux
6578 	 * (A_ONOFF vs. A_ON) requires a full modeset.
6579 	 */
6580 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
6581 
6582 	if (!fastset) {
6583 		PIPE_CONF_CHECK_I(pipe_src_w);
6584 		PIPE_CONF_CHECK_I(pipe_src_h);
6585 
6586 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
6587 		if (current_config->pch_pfit.enabled) {
6588 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
6589 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
6590 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
6591 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
6592 		}
6593 
6594 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
6595 		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
6596 
6597 		PIPE_CONF_CHECK_X(gamma_mode);
6598 		if (IS_CHERRYVIEW(dev_priv))
6599 			PIPE_CONF_CHECK_X(cgm_mode);
6600 		else
6601 			PIPE_CONF_CHECK_X(csc_mode);
6602 		PIPE_CONF_CHECK_BOOL(gamma_enable);
6603 		PIPE_CONF_CHECK_BOOL(csc_enable);
6604 
6605 		PIPE_CONF_CHECK_I(linetime);
6606 		PIPE_CONF_CHECK_I(ips_linetime);
6607 
6608 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
6609 		if (bp_gamma)
6610 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
6611 
6612 		if (current_config->active_planes) {
6613 			PIPE_CONF_CHECK_BOOL(has_psr);
6614 			PIPE_CONF_CHECK_BOOL(has_psr2);
6615 			PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
6616 			PIPE_CONF_CHECK_I(dc3co_exitline);
6617 		}
6618 	}
6619 
6620 	PIPE_CONF_CHECK_BOOL(double_wide);
6621 
6622 	if (dev_priv->dpll.mgr) {
6623 		PIPE_CONF_CHECK_P(shared_dpll);
6624 
6625 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
6626 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
6627 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
6628 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
6629 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
6630 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
6631 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
6632 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
6633 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
6634 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
6635 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
6636 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
6637 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
6638 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
6639 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
6640 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
6641 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
6642 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
6643 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
6644 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
6645 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
6646 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
6647 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
6648 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
6649 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
6650 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
6651 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
6652 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
6653 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
6654 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
6655 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
6656 	}
6657 
6658 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
6659 	PIPE_CONF_CHECK_X(dsi_pll.div);
6660 
6661 	if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
6662 		PIPE_CONF_CHECK_I(pipe_bpp);
6663 
6664 	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
6665 	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
6666 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
6667 
6668 	PIPE_CONF_CHECK_I(min_voltage_level);
6669 
6670 	if (current_config->has_psr || pipe_config->has_psr)
6671 		PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
6672 					    ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
6673 	else
6674 		PIPE_CONF_CHECK_X(infoframes.enable);
6675 
6676 	PIPE_CONF_CHECK_X(infoframes.gcp);
6677 	PIPE_CONF_CHECK_INFOFRAME(avi);
6678 	PIPE_CONF_CHECK_INFOFRAME(spd);
6679 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
6680 	PIPE_CONF_CHECK_INFOFRAME(drm);
6681 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
6682 
6683 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
6684 	PIPE_CONF_CHECK_I(master_transcoder);
6685 	PIPE_CONF_CHECK_BOOL(bigjoiner);
6686 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
6687 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
6688 
6689 	PIPE_CONF_CHECK_I(dsc.compression_enable);
6690 	PIPE_CONF_CHECK_I(dsc.dsc_split);
6691 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
6692 
6693 	PIPE_CONF_CHECK_BOOL(splitter.enable);
6694 	PIPE_CONF_CHECK_I(splitter.link_count);
6695 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
6696 
6697 	PIPE_CONF_CHECK_I(mst_master_transcoder);
6698 
6699 	PIPE_CONF_CHECK_BOOL(vrr.enable);
6700 	PIPE_CONF_CHECK_I(vrr.vmin);
6701 	PIPE_CONF_CHECK_I(vrr.vmax);
6702 	PIPE_CONF_CHECK_I(vrr.flipline);
6703 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
6704 	PIPE_CONF_CHECK_I(vrr.guardband);
6705 
6706 #undef PIPE_CONF_CHECK_X
6707 #undef PIPE_CONF_CHECK_I
6708 #undef PIPE_CONF_CHECK_BOOL
6709 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
6710 #undef PIPE_CONF_CHECK_P
6711 #undef PIPE_CONF_CHECK_FLAGS
6712 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
6713 #undef PIPE_CONF_CHECK_COLOR_LUT
6714 #undef PIPE_CONF_QUIRK
6715 
6716 	return ret;
6717 }
6718 
6719 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
6720 					   const struct intel_crtc_state *pipe_config)
6721 {
6722 	if (pipe_config->has_pch_encoder) {
6723 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6724 							    &pipe_config->fdi_m_n);
6725 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
6726 
6727 		/*
6728 		 * FDI already provided one idea for the dotclock.
6729 		 * Yell if the encoder disagrees.
6730 		 */
6731 		drm_WARN(&dev_priv->drm,
6732 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
6733 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
6734 			 fdi_dotclock, dotclock);
6735 	}
6736 }
6737 
6738 static void verify_wm_state(struct intel_crtc *crtc,
6739 			    struct intel_crtc_state *new_crtc_state)
6740 {
6741 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6742 	struct skl_hw_state {
6743 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
6744 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
6745 		struct skl_pipe_wm wm;
6746 	} *hw;
6747 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
6748 	int level, max_level = ilk_wm_max_level(dev_priv);
6749 	struct intel_plane *plane;
6750 	u8 hw_enabled_slices;
6751 
6752 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
6753 		return;
6754 
6755 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
6756 	if (!hw)
6757 		return;
6758 
6759 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
6760 
6761 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
6762 
6763 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
6764 
6765 	if (DISPLAY_VER(dev_priv) >= 11 &&
6766 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
6767 		drm_err(&dev_priv->drm,
6768 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
6769 			dev_priv->dbuf.enabled_slices,
6770 			hw_enabled_slices);
6771 
6772 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
6773 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
6774 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
6775 
6776 		/* Watermarks */
6777 		for (level = 0; level <= max_level; level++) {
6778 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
6779 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
6780 
6781 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
6782 				continue;
6783 
6784 			drm_err(&dev_priv->drm,
6785 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6786 				plane->base.base.id, plane->base.name, level,
6787 				sw_wm_level->enable,
6788 				sw_wm_level->blocks,
6789 				sw_wm_level->lines,
6790 				hw_wm_level->enable,
6791 				hw_wm_level->blocks,
6792 				hw_wm_level->lines);
6793 		}
6794 
6795 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
6796 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
6797 
6798 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6799 			drm_err(&dev_priv->drm,
6800 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6801 				plane->base.base.id, plane->base.name,
6802 				sw_wm_level->enable,
6803 				sw_wm_level->blocks,
6804 				sw_wm_level->lines,
6805 				hw_wm_level->enable,
6806 				hw_wm_level->blocks,
6807 				hw_wm_level->lines);
6808 		}
6809 
6810 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
6811 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
6812 
6813 		if (HAS_HW_SAGV_WM(dev_priv) &&
6814 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6815 			drm_err(&dev_priv->drm,
6816 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6817 				plane->base.base.id, plane->base.name,
6818 				sw_wm_level->enable,
6819 				sw_wm_level->blocks,
6820 				sw_wm_level->lines,
6821 				hw_wm_level->enable,
6822 				hw_wm_level->blocks,
6823 				hw_wm_level->lines);
6824 		}
6825 
6826 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
6827 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
6828 
6829 		if (HAS_HW_SAGV_WM(dev_priv) &&
6830 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
6831 			drm_err(&dev_priv->drm,
6832 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
6833 				plane->base.base.id, plane->base.name,
6834 				sw_wm_level->enable,
6835 				sw_wm_level->blocks,
6836 				sw_wm_level->lines,
6837 				hw_wm_level->enable,
6838 				hw_wm_level->blocks,
6839 				hw_wm_level->lines);
6840 		}
6841 
6842 		/* DDB */
6843 		hw_ddb_entry = &hw->ddb_y[plane->id];
6844 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
6845 
6846 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
6847 			drm_err(&dev_priv->drm,
6848 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
6849 				plane->base.base.id, plane->base.name,
6850 				sw_ddb_entry->start, sw_ddb_entry->end,
6851 				hw_ddb_entry->start, hw_ddb_entry->end);
6852 		}
6853 	}
6854 
6855 	kfree(hw);
6856 }
6857 
6858 static void
6859 verify_connector_state(struct intel_atomic_state *state,
6860 		       struct intel_crtc *crtc)
6861 {
6862 	struct drm_connector *connector;
6863 	struct drm_connector_state *new_conn_state;
6864 	int i;
6865 
6866 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
6867 		struct drm_encoder *encoder = connector->encoder;
6868 		struct intel_crtc_state *crtc_state = NULL;
6869 
6870 		if (new_conn_state->crtc != &crtc->base)
6871 			continue;
6872 
6873 		if (crtc)
6874 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6875 
6876 		intel_connector_verify_state(crtc_state, new_conn_state);
6877 
6878 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
6879 		     "connector's atomic encoder doesn't match legacy encoder\n");
6880 	}
6881 }
6882 
6883 static void
6884 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
6885 {
6886 	struct intel_encoder *encoder;
6887 	struct drm_connector *connector;
6888 	struct drm_connector_state *old_conn_state, *new_conn_state;
6889 	int i;
6890 
6891 	for_each_intel_encoder(&dev_priv->drm, encoder) {
6892 		bool enabled = false, found = false;
6893 		enum pipe pipe;
6894 
6895 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
6896 			    encoder->base.base.id,
6897 			    encoder->base.name);
6898 
6899 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
6900 						   new_conn_state, i) {
6901 			if (old_conn_state->best_encoder == &encoder->base)
6902 				found = true;
6903 
6904 			if (new_conn_state->best_encoder != &encoder->base)
6905 				continue;
6906 			found = enabled = true;
6907 
6908 			I915_STATE_WARN(new_conn_state->crtc !=
6909 					encoder->base.crtc,
6910 			     "connector's crtc doesn't match encoder crtc\n");
6911 		}
6912 
6913 		if (!found)
6914 			continue;
6915 
6916 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
6917 		     "encoder's enabled state mismatch "
6918 		     "(expected %i, found %i)\n",
6919 		     !!encoder->base.crtc, enabled);
6920 
6921 		if (!encoder->base.crtc) {
6922 			bool active;
6923 
6924 			active = encoder->get_hw_state(encoder, &pipe);
6925 			I915_STATE_WARN(active,
6926 			     "encoder detached but still enabled on pipe %c.\n",
6927 			     pipe_name(pipe));
6928 		}
6929 	}
6930 }
6931 
6932 static void
6933 verify_crtc_state(struct intel_crtc *crtc,
6934 		  struct intel_crtc_state *old_crtc_state,
6935 		  struct intel_crtc_state *new_crtc_state)
6936 {
6937 	struct drm_device *dev = crtc->base.dev;
6938 	struct drm_i915_private *dev_priv = to_i915(dev);
6939 	struct intel_encoder *encoder;
6940 	struct intel_crtc_state *pipe_config = old_crtc_state;
6941 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
6942 	struct intel_crtc *master_crtc;
6943 
6944 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
6945 	intel_crtc_free_hw_state(old_crtc_state);
6946 	intel_crtc_state_reset(old_crtc_state, crtc);
6947 	old_crtc_state->uapi.state = state;
6948 
6949 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
6950 		    crtc->base.name);
6951 
6952 	pipe_config->hw.enable = new_crtc_state->hw.enable;
6953 
6954 	intel_crtc_get_pipe_config(pipe_config);
6955 
6956 	/* we keep both pipes enabled on 830 */
6957 	if (IS_I830(dev_priv) && pipe_config->hw.active)
6958 		pipe_config->hw.active = new_crtc_state->hw.active;
6959 
6960 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
6961 			"crtc active state doesn't match with hw state "
6962 			"(expected %i, found %i)\n",
6963 			new_crtc_state->hw.active, pipe_config->hw.active);
6964 
6965 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
6966 			"transitional active state does not match atomic hw state "
6967 			"(expected %i, found %i)\n",
6968 			new_crtc_state->hw.active, crtc->active);
6969 
6970 	master_crtc = intel_master_crtc(new_crtc_state);
6971 
6972 	for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
6973 		enum pipe pipe;
6974 		bool active;
6975 
6976 		active = encoder->get_hw_state(encoder, &pipe);
6977 		I915_STATE_WARN(active != new_crtc_state->hw.active,
6978 				"[ENCODER:%i] active %i with crtc active %i\n",
6979 				encoder->base.base.id, active,
6980 				new_crtc_state->hw.active);
6981 
6982 		I915_STATE_WARN(active && master_crtc->pipe != pipe,
6983 				"Encoder connected to wrong pipe %c\n",
6984 				pipe_name(pipe));
6985 
6986 		if (active)
6987 			intel_encoder_get_config(encoder, pipe_config);
6988 	}
6989 
6990 	if (!new_crtc_state->hw.active)
6991 		return;
6992 
6993 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
6994 
6995 	if (!intel_pipe_config_compare(new_crtc_state,
6996 				       pipe_config, false)) {
6997 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
6998 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
6999 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
7000 	}
7001 }
7002 
7003 static void
7004 intel_verify_planes(struct intel_atomic_state *state)
7005 {
7006 	struct intel_plane *plane;
7007 	const struct intel_plane_state *plane_state;
7008 	int i;
7009 
7010 	for_each_new_intel_plane_in_state(state, plane,
7011 					  plane_state, i)
7012 		assert_plane(plane, plane_state->planar_slave ||
7013 			     plane_state->uapi.visible);
7014 }
7015 
7016 static void
7017 verify_single_dpll_state(struct drm_i915_private *dev_priv,
7018 			 struct intel_shared_dpll *pll,
7019 			 struct intel_crtc *crtc,
7020 			 struct intel_crtc_state *new_crtc_state)
7021 {
7022 	struct intel_dpll_hw_state dpll_hw_state;
7023 	u8 pipe_mask;
7024 	bool active;
7025 
7026 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
7027 
7028 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
7029 
7030 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
7031 
7032 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
7033 		I915_STATE_WARN(!pll->on && pll->active_mask,
7034 		     "pll in active use but not on in sw tracking\n");
7035 		I915_STATE_WARN(pll->on && !pll->active_mask,
7036 		     "pll is on but not used by any active pipe\n");
7037 		I915_STATE_WARN(pll->on != active,
7038 		     "pll on state mismatch (expected %i, found %i)\n",
7039 		     pll->on, active);
7040 	}
7041 
7042 	if (!crtc) {
7043 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
7044 				"more active pll users than references: 0x%x vs 0x%x\n",
7045 				pll->active_mask, pll->state.pipe_mask);
7046 
7047 		return;
7048 	}
7049 
7050 	pipe_mask = BIT(crtc->pipe);
7051 
7052 	if (new_crtc_state->hw.active)
7053 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
7054 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
7055 				pipe_name(crtc->pipe), pll->active_mask);
7056 	else
7057 		I915_STATE_WARN(pll->active_mask & pipe_mask,
7058 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
7059 				pipe_name(crtc->pipe), pll->active_mask);
7060 
7061 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
7062 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
7063 			pipe_mask, pll->state.pipe_mask);
7064 
7065 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
7066 					  &dpll_hw_state,
7067 					  sizeof(dpll_hw_state)),
7068 			"pll hw state mismatch\n");
7069 }
7070 
7071 static void
7072 verify_shared_dpll_state(struct intel_crtc *crtc,
7073 			 struct intel_crtc_state *old_crtc_state,
7074 			 struct intel_crtc_state *new_crtc_state)
7075 {
7076 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7077 
7078 	if (new_crtc_state->shared_dpll)
7079 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
7080 
7081 	if (old_crtc_state->shared_dpll &&
7082 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
7083 		u8 pipe_mask = BIT(crtc->pipe);
7084 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
7085 
7086 		I915_STATE_WARN(pll->active_mask & pipe_mask,
7087 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
7088 				pipe_name(crtc->pipe), pll->active_mask);
7089 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
7090 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
7091 				pipe_name(crtc->pipe), pll->state.pipe_mask);
7092 	}
7093 }
7094 
7095 static void
7096 verify_mpllb_state(struct intel_atomic_state *state,
7097 		   struct intel_crtc_state *new_crtc_state)
7098 {
7099 	struct drm_i915_private *i915 = to_i915(state->base.dev);
7100 	struct intel_mpllb_state mpllb_hw_state = { 0 };
7101 	struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
7102 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7103 	struct intel_encoder *encoder;
7104 
7105 	if (!IS_DG2(i915))
7106 		return;
7107 
7108 	if (!new_crtc_state->hw.active)
7109 		return;
7110 
7111 	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
7112 	intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
7113 
7114 #define MPLLB_CHECK(name) do { \
7115 	if (mpllb_sw_state->name != mpllb_hw_state.name) { \
7116 		pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
7117 				     "(expected 0x%08x, found 0x%08x)", \
7118 				     mpllb_sw_state->name, \
7119 				     mpllb_hw_state.name); \
7120 	} \
7121 } while (0)
7122 
7123 	MPLLB_CHECK(mpllb_cp);
7124 	MPLLB_CHECK(mpllb_div);
7125 	MPLLB_CHECK(mpllb_div2);
7126 	MPLLB_CHECK(mpllb_fracn1);
7127 	MPLLB_CHECK(mpllb_fracn2);
7128 	MPLLB_CHECK(mpllb_sscen);
7129 	MPLLB_CHECK(mpllb_sscstep);
7130 
7131 	/*
7132 	 * ref_control is handled by the hardware/firemware and never
7133 	 * programmed by the software, but the proper values are supplied
7134 	 * in the bspec for verification purposes.
7135 	 */
7136 	MPLLB_CHECK(ref_control);
7137 
7138 #undef MPLLB_CHECK
7139 }
7140 
7141 static void
7142 intel_modeset_verify_crtc(struct intel_crtc *crtc,
7143 			  struct intel_atomic_state *state,
7144 			  struct intel_crtc_state *old_crtc_state,
7145 			  struct intel_crtc_state *new_crtc_state)
7146 {
7147 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
7148 		return;
7149 
7150 	verify_wm_state(crtc, new_crtc_state);
7151 	verify_connector_state(state, crtc);
7152 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
7153 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
7154 	verify_mpllb_state(state, new_crtc_state);
7155 }
7156 
7157 static void
7158 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
7159 {
7160 	int i;
7161 
7162 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
7163 		verify_single_dpll_state(dev_priv,
7164 					 &dev_priv->dpll.shared_dplls[i],
7165 					 NULL, NULL);
7166 }
7167 
7168 static void
7169 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
7170 			      struct intel_atomic_state *state)
7171 {
7172 	verify_encoder_state(dev_priv, state);
7173 	verify_connector_state(state, NULL);
7174 	verify_disabled_dpll_state(dev_priv);
7175 }
7176 
7177 int intel_modeset_all_pipes(struct intel_atomic_state *state)
7178 {
7179 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7180 	struct intel_crtc *crtc;
7181 
7182 	/*
7183 	 * Add all pipes to the state, and force
7184 	 * a modeset on all the active ones.
7185 	 */
7186 	for_each_intel_crtc(&dev_priv->drm, crtc) {
7187 		struct intel_crtc_state *crtc_state;
7188 		int ret;
7189 
7190 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7191 		if (IS_ERR(crtc_state))
7192 			return PTR_ERR(crtc_state);
7193 
7194 		if (!crtc_state->hw.active ||
7195 		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
7196 			continue;
7197 
7198 		crtc_state->uapi.mode_changed = true;
7199 
7200 		ret = drm_atomic_add_affected_connectors(&state->base,
7201 							 &crtc->base);
7202 		if (ret)
7203 			return ret;
7204 
7205 		ret = intel_atomic_add_affected_planes(state, crtc);
7206 		if (ret)
7207 			return ret;
7208 
7209 		crtc_state->update_planes |= crtc_state->active_planes;
7210 	}
7211 
7212 	return 0;
7213 }
7214 
7215 static void
7216 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
7217 {
7218 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7219 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7220 	struct drm_display_mode adjusted_mode =
7221 		crtc_state->hw.adjusted_mode;
7222 
7223 	if (crtc_state->vrr.enable) {
7224 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
7225 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
7226 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
7227 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
7228 	}
7229 
7230 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
7231 
7232 	crtc->mode_flags = crtc_state->mode_flags;
7233 
7234 	/*
7235 	 * The scanline counter increments at the leading edge of hsync.
7236 	 *
7237 	 * On most platforms it starts counting from vtotal-1 on the
7238 	 * first active line. That means the scanline counter value is
7239 	 * always one less than what we would expect. Ie. just after
7240 	 * start of vblank, which also occurs at start of hsync (on the
7241 	 * last active line), the scanline counter will read vblank_start-1.
7242 	 *
7243 	 * On gen2 the scanline counter starts counting from 1 instead
7244 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
7245 	 * to keep the value positive), instead of adding one.
7246 	 *
7247 	 * On HSW+ the behaviour of the scanline counter depends on the output
7248 	 * type. For DP ports it behaves like most other platforms, but on HDMI
7249 	 * there's an extra 1 line difference. So we need to add two instead of
7250 	 * one to the value.
7251 	 *
7252 	 * On VLV/CHV DSI the scanline counter would appear to increment
7253 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
7254 	 * that means we can't tell whether we're in vblank or not while
7255 	 * we're on that particular line. We must still set scanline_offset
7256 	 * to 1 so that the vblank timestamps come out correct when we query
7257 	 * the scanline counter from within the vblank interrupt handler.
7258 	 * However if queried just before the start of vblank we'll get an
7259 	 * answer that's slightly in the future.
7260 	 */
7261 	if (DISPLAY_VER(dev_priv) == 2) {
7262 		int vtotal;
7263 
7264 		vtotal = adjusted_mode.crtc_vtotal;
7265 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
7266 			vtotal /= 2;
7267 
7268 		crtc->scanline_offset = vtotal - 1;
7269 	} else if (HAS_DDI(dev_priv) &&
7270 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
7271 		crtc->scanline_offset = 2;
7272 	} else {
7273 		crtc->scanline_offset = 1;
7274 	}
7275 }
7276 
7277 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
7278 {
7279 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7280 	struct intel_crtc_state *new_crtc_state;
7281 	struct intel_crtc *crtc;
7282 	int i;
7283 
7284 	if (!dev_priv->dpll_funcs)
7285 		return;
7286 
7287 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7288 		if (!intel_crtc_needs_modeset(new_crtc_state))
7289 			continue;
7290 
7291 		intel_release_shared_dplls(state, crtc);
7292 	}
7293 }
7294 
7295 /*
7296  * This implements the workaround described in the "notes" section of the mode
7297  * set sequence documentation. When going from no pipes or single pipe to
7298  * multiple pipes, and planes are enabled after the pipe, we need to wait at
7299  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
7300  */
7301 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
7302 {
7303 	struct intel_crtc_state *crtc_state;
7304 	struct intel_crtc *crtc;
7305 	struct intel_crtc_state *first_crtc_state = NULL;
7306 	struct intel_crtc_state *other_crtc_state = NULL;
7307 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
7308 	int i;
7309 
7310 	/* look at all crtc's that are going to be enabled in during modeset */
7311 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7312 		if (!crtc_state->hw.active ||
7313 		    !intel_crtc_needs_modeset(crtc_state))
7314 			continue;
7315 
7316 		if (first_crtc_state) {
7317 			other_crtc_state = crtc_state;
7318 			break;
7319 		} else {
7320 			first_crtc_state = crtc_state;
7321 			first_pipe = crtc->pipe;
7322 		}
7323 	}
7324 
7325 	/* No workaround needed? */
7326 	if (!first_crtc_state)
7327 		return 0;
7328 
7329 	/* w/a possibly needed, check how many crtc's are already enabled. */
7330 	for_each_intel_crtc(state->base.dev, crtc) {
7331 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
7332 		if (IS_ERR(crtc_state))
7333 			return PTR_ERR(crtc_state);
7334 
7335 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
7336 
7337 		if (!crtc_state->hw.active ||
7338 		    intel_crtc_needs_modeset(crtc_state))
7339 			continue;
7340 
7341 		/* 2 or more enabled crtcs means no need for w/a */
7342 		if (enabled_pipe != INVALID_PIPE)
7343 			return 0;
7344 
7345 		enabled_pipe = crtc->pipe;
7346 	}
7347 
7348 	if (enabled_pipe != INVALID_PIPE)
7349 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
7350 	else if (other_crtc_state)
7351 		other_crtc_state->hsw_workaround_pipe = first_pipe;
7352 
7353 	return 0;
7354 }
7355 
7356 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
7357 			   u8 active_pipes)
7358 {
7359 	const struct intel_crtc_state *crtc_state;
7360 	struct intel_crtc *crtc;
7361 	int i;
7362 
7363 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7364 		if (crtc_state->hw.active)
7365 			active_pipes |= BIT(crtc->pipe);
7366 		else
7367 			active_pipes &= ~BIT(crtc->pipe);
7368 	}
7369 
7370 	return active_pipes;
7371 }
7372 
7373 static int intel_modeset_checks(struct intel_atomic_state *state)
7374 {
7375 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7376 
7377 	state->modeset = true;
7378 
7379 	if (IS_HASWELL(dev_priv))
7380 		return hsw_mode_set_planes_workaround(state);
7381 
7382 	return 0;
7383 }
7384 
7385 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
7386 				     struct intel_crtc_state *new_crtc_state)
7387 {
7388 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
7389 		return;
7390 
7391 	new_crtc_state->uapi.mode_changed = false;
7392 	new_crtc_state->update_pipe = true;
7393 }
7394 
7395 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
7396 				    struct intel_crtc_state *new_crtc_state)
7397 {
7398 	/*
7399 	 * If we're not doing the full modeset we want to
7400 	 * keep the current M/N values as they may be
7401 	 * sufficiently different to the computed values
7402 	 * to cause problems.
7403 	 *
7404 	 * FIXME: should really copy more fuzzy state here
7405 	 */
7406 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
7407 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
7408 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
7409 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
7410 }
7411 
7412 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
7413 					  struct intel_crtc *crtc,
7414 					  u8 plane_ids_mask)
7415 {
7416 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7417 	struct intel_plane *plane;
7418 
7419 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7420 		struct intel_plane_state *plane_state;
7421 
7422 		if ((plane_ids_mask & BIT(plane->id)) == 0)
7423 			continue;
7424 
7425 		plane_state = intel_atomic_get_plane_state(state, plane);
7426 		if (IS_ERR(plane_state))
7427 			return PTR_ERR(plane_state);
7428 	}
7429 
7430 	return 0;
7431 }
7432 
7433 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
7434 				     struct intel_crtc *crtc)
7435 {
7436 	const struct intel_crtc_state *old_crtc_state =
7437 		intel_atomic_get_old_crtc_state(state, crtc);
7438 	const struct intel_crtc_state *new_crtc_state =
7439 		intel_atomic_get_new_crtc_state(state, crtc);
7440 
7441 	return intel_crtc_add_planes_to_state(state, crtc,
7442 					      old_crtc_state->enabled_planes |
7443 					      new_crtc_state->enabled_planes);
7444 }
7445 
7446 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
7447 {
7448 	/* See {hsw,vlv,ivb}_plane_ratio() */
7449 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
7450 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7451 		IS_IVYBRIDGE(dev_priv);
7452 }
7453 
7454 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
7455 					   struct intel_crtc *crtc,
7456 					   struct intel_crtc *other)
7457 {
7458 	const struct intel_plane_state *plane_state;
7459 	struct intel_plane *plane;
7460 	u8 plane_ids = 0;
7461 	int i;
7462 
7463 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7464 		if (plane->pipe == crtc->pipe)
7465 			plane_ids |= BIT(plane->id);
7466 	}
7467 
7468 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
7469 }
7470 
7471 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
7472 {
7473 	const struct intel_crtc_state *crtc_state;
7474 	struct intel_crtc *crtc;
7475 	int i;
7476 
7477 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7478 		int ret;
7479 
7480 		if (!crtc_state->bigjoiner)
7481 			continue;
7482 
7483 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
7484 						      crtc_state->bigjoiner_linked_crtc);
7485 		if (ret)
7486 			return ret;
7487 	}
7488 
7489 	return 0;
7490 }
7491 
7492 static int intel_atomic_check_planes(struct intel_atomic_state *state)
7493 {
7494 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7495 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7496 	struct intel_plane_state *plane_state;
7497 	struct intel_plane *plane;
7498 	struct intel_crtc *crtc;
7499 	int i, ret;
7500 
7501 	ret = icl_add_linked_planes(state);
7502 	if (ret)
7503 		return ret;
7504 
7505 	ret = intel_bigjoiner_add_affected_planes(state);
7506 	if (ret)
7507 		return ret;
7508 
7509 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7510 		ret = intel_plane_atomic_check(state, plane);
7511 		if (ret) {
7512 			drm_dbg_atomic(&dev_priv->drm,
7513 				       "[PLANE:%d:%s] atomic driver check failed\n",
7514 				       plane->base.base.id, plane->base.name);
7515 			return ret;
7516 		}
7517 	}
7518 
7519 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7520 					    new_crtc_state, i) {
7521 		u8 old_active_planes, new_active_planes;
7522 
7523 		ret = icl_check_nv12_planes(new_crtc_state);
7524 		if (ret)
7525 			return ret;
7526 
7527 		/*
7528 		 * On some platforms the number of active planes affects
7529 		 * the planes' minimum cdclk calculation. Add such planes
7530 		 * to the state before we compute the minimum cdclk.
7531 		 */
7532 		if (!active_planes_affects_min_cdclk(dev_priv))
7533 			continue;
7534 
7535 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7536 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
7537 
7538 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
7539 			continue;
7540 
7541 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
7542 		if (ret)
7543 			return ret;
7544 	}
7545 
7546 	return 0;
7547 }
7548 
7549 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
7550 				    bool *need_cdclk_calc)
7551 {
7552 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
7553 	const struct intel_cdclk_state *old_cdclk_state;
7554 	const struct intel_cdclk_state *new_cdclk_state;
7555 	struct intel_plane_state *plane_state;
7556 	struct intel_bw_state *new_bw_state;
7557 	struct intel_plane *plane;
7558 	int min_cdclk = 0;
7559 	enum pipe pipe;
7560 	int ret;
7561 	int i;
7562 	/*
7563 	 * active_planes bitmask has been updated, and potentially
7564 	 * affected planes are part of the state. We can now
7565 	 * compute the minimum cdclk for each plane.
7566 	 */
7567 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7568 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
7569 		if (ret)
7570 			return ret;
7571 	}
7572 
7573 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
7574 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
7575 
7576 	if (new_cdclk_state &&
7577 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
7578 		*need_cdclk_calc = true;
7579 
7580 	ret = intel_cdclk_bw_calc_min_cdclk(state);
7581 	if (ret)
7582 		return ret;
7583 
7584 	new_bw_state = intel_atomic_get_new_bw_state(state);
7585 
7586 	if (!new_cdclk_state || !new_bw_state)
7587 		return 0;
7588 
7589 	for_each_pipe(dev_priv, pipe) {
7590 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
7591 
7592 		/*
7593 		 * Currently do this change only if we need to increase
7594 		 */
7595 		if (new_bw_state->min_cdclk > min_cdclk)
7596 			*need_cdclk_calc = true;
7597 	}
7598 
7599 	return 0;
7600 }
7601 
7602 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
7603 {
7604 	struct intel_crtc_state *crtc_state;
7605 	struct intel_crtc *crtc;
7606 	int i;
7607 
7608 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7609 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7610 		int ret;
7611 
7612 		ret = intel_crtc_atomic_check(state, crtc);
7613 		if (ret) {
7614 			drm_dbg_atomic(&i915->drm,
7615 				       "[CRTC:%d:%s] atomic driver check failed\n",
7616 				       crtc->base.base.id, crtc->base.name);
7617 			return ret;
7618 		}
7619 	}
7620 
7621 	return 0;
7622 }
7623 
7624 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
7625 					       u8 transcoders)
7626 {
7627 	const struct intel_crtc_state *new_crtc_state;
7628 	struct intel_crtc *crtc;
7629 	int i;
7630 
7631 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7632 		if (new_crtc_state->hw.enable &&
7633 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
7634 		    intel_crtc_needs_modeset(new_crtc_state))
7635 			return true;
7636 	}
7637 
7638 	return false;
7639 }
7640 
7641 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
7642 					struct intel_crtc *crtc,
7643 					struct intel_crtc_state *old_crtc_state,
7644 					struct intel_crtc_state *new_crtc_state)
7645 {
7646 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
7647 	struct intel_crtc *slave_crtc, *master_crtc;
7648 
7649 	/* slave being enabled, is master is still claiming this crtc? */
7650 	if (old_crtc_state->bigjoiner_slave) {
7651 		slave_crtc = crtc;
7652 		master_crtc = old_crtc_state->bigjoiner_linked_crtc;
7653 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
7654 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
7655 			goto claimed;
7656 	}
7657 
7658 	if (!new_crtc_state->bigjoiner)
7659 		return 0;
7660 
7661 	slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
7662 	if (!slave_crtc) {
7663 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
7664 			      "CRTC + 1 to be used, doesn't exist\n",
7665 			      crtc->base.base.id, crtc->base.name);
7666 		return -EINVAL;
7667 	}
7668 
7669 	new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
7670 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
7671 	master_crtc = crtc;
7672 	if (IS_ERR(slave_crtc_state))
7673 		return PTR_ERR(slave_crtc_state);
7674 
7675 	/* master being enabled, slave was already configured? */
7676 	if (slave_crtc_state->uapi.enable)
7677 		goto claimed;
7678 
7679 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
7680 		      slave_crtc->base.base.id, slave_crtc->base.name);
7681 
7682 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
7683 
7684 claimed:
7685 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
7686 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
7687 		      slave_crtc->base.base.id, slave_crtc->base.name,
7688 		      master_crtc->base.base.id, master_crtc->base.name);
7689 	return -EINVAL;
7690 }
7691 
7692 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
7693 				 struct intel_crtc_state *master_crtc_state)
7694 {
7695 	struct intel_crtc_state *slave_crtc_state =
7696 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
7697 
7698 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
7699 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
7700 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
7701 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
7702 }
7703 
7704 /**
7705  * DOC: asynchronous flip implementation
7706  *
7707  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
7708  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
7709  * Correspondingly, support is currently added for primary plane only.
7710  *
7711  * Async flip can only change the plane surface address, so anything else
7712  * changing is rejected from the intel_atomic_check_async() function.
7713  * Once this check is cleared, flip done interrupt is enabled using
7714  * the intel_crtc_enable_flip_done() function.
7715  *
7716  * As soon as the surface address register is written, flip done interrupt is
7717  * generated and the requested events are sent to the usersapce in the interrupt
7718  * handler itself. The timestamp and sequence sent during the flip done event
7719  * correspond to the last vblank and have no relation to the actual time when
7720  * the flip done event was sent.
7721  */
7722 static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
7723 {
7724 	struct drm_i915_private *i915 = to_i915(state->base.dev);
7725 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7726 	const struct intel_plane_state *new_plane_state, *old_plane_state;
7727 	struct intel_plane *plane;
7728 	int i;
7729 
7730 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
7731 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
7732 
7733 	if (intel_crtc_needs_modeset(new_crtc_state)) {
7734 		drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
7735 		return -EINVAL;
7736 	}
7737 
7738 	if (!new_crtc_state->hw.active) {
7739 		drm_dbg_kms(&i915->drm, "CRTC inactive\n");
7740 		return -EINVAL;
7741 	}
7742 	if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
7743 		drm_dbg_kms(&i915->drm,
7744 			    "Active planes cannot be changed during async flip\n");
7745 		return -EINVAL;
7746 	}
7747 
7748 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
7749 					     new_plane_state, i) {
7750 		if (plane->pipe != crtc->pipe)
7751 			continue;
7752 
7753 		/*
7754 		 * TODO: Async flip is only supported through the page flip IOCTL
7755 		 * as of now. So support currently added for primary plane only.
7756 		 * Support for other planes on platforms on which supports
7757 		 * this(vlv/chv and icl+) should be added when async flip is
7758 		 * enabled in the atomic IOCTL path.
7759 		 */
7760 		if (!plane->async_flip)
7761 			return -EINVAL;
7762 
7763 		/*
7764 		 * FIXME: This check is kept generic for all platforms.
7765 		 * Need to verify this for all gen9 platforms to enable
7766 		 * this selectively if required.
7767 		 */
7768 		switch (new_plane_state->hw.fb->modifier) {
7769 		case I915_FORMAT_MOD_X_TILED:
7770 		case I915_FORMAT_MOD_Y_TILED:
7771 		case I915_FORMAT_MOD_Yf_TILED:
7772 			break;
7773 		default:
7774 			drm_dbg_kms(&i915->drm,
7775 				    "Linear memory/CCS does not support async flips\n");
7776 			return -EINVAL;
7777 		}
7778 
7779 		if (new_plane_state->hw.fb->format->num_planes > 1) {
7780 			drm_dbg_kms(&i915->drm,
7781 				    "Planar formats not supported with async flips\n");
7782 			return -EINVAL;
7783 		}
7784 
7785 		if (old_plane_state->view.color_plane[0].mapping_stride !=
7786 		    new_plane_state->view.color_plane[0].mapping_stride) {
7787 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
7788 			return -EINVAL;
7789 		}
7790 
7791 		if (old_plane_state->hw.fb->modifier !=
7792 		    new_plane_state->hw.fb->modifier) {
7793 			drm_dbg_kms(&i915->drm,
7794 				    "Framebuffer modifiers cannot be changed in async flip\n");
7795 			return -EINVAL;
7796 		}
7797 
7798 		if (old_plane_state->hw.fb->format !=
7799 		    new_plane_state->hw.fb->format) {
7800 			drm_dbg_kms(&i915->drm,
7801 				    "Framebuffer format cannot be changed in async flip\n");
7802 			return -EINVAL;
7803 		}
7804 
7805 		if (old_plane_state->hw.rotation !=
7806 		    new_plane_state->hw.rotation) {
7807 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
7808 			return -EINVAL;
7809 		}
7810 
7811 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
7812 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
7813 			drm_dbg_kms(&i915->drm,
7814 				    "Plane size/co-ordinates cannot be changed in async flip\n");
7815 			return -EINVAL;
7816 		}
7817 
7818 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
7819 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
7820 			return -EINVAL;
7821 		}
7822 
7823 		if (old_plane_state->hw.pixel_blend_mode !=
7824 		    new_plane_state->hw.pixel_blend_mode) {
7825 			drm_dbg_kms(&i915->drm,
7826 				    "Pixel blend mode cannot be changed in async flip\n");
7827 			return -EINVAL;
7828 		}
7829 
7830 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
7831 			drm_dbg_kms(&i915->drm,
7832 				    "Color encoding cannot be changed in async flip\n");
7833 			return -EINVAL;
7834 		}
7835 
7836 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
7837 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
7838 			return -EINVAL;
7839 		}
7840 
7841 		/* plane decryption is allow to change only in synchronous flips */
7842 		if (old_plane_state->decrypt != new_plane_state->decrypt)
7843 			return -EINVAL;
7844 	}
7845 
7846 	return 0;
7847 }
7848 
7849 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
7850 {
7851 	struct intel_crtc_state *crtc_state;
7852 	struct intel_crtc *crtc;
7853 	int i;
7854 
7855 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7856 		struct intel_crtc_state *linked_crtc_state;
7857 		struct intel_crtc *linked_crtc;
7858 		int ret;
7859 
7860 		if (!crtc_state->bigjoiner)
7861 			continue;
7862 
7863 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
7864 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
7865 		if (IS_ERR(linked_crtc_state))
7866 			return PTR_ERR(linked_crtc_state);
7867 
7868 		if (!intel_crtc_needs_modeset(crtc_state))
7869 			continue;
7870 
7871 		linked_crtc_state->uapi.mode_changed = true;
7872 
7873 		ret = drm_atomic_add_affected_connectors(&state->base,
7874 							 &linked_crtc->base);
7875 		if (ret)
7876 			return ret;
7877 
7878 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
7879 		if (ret)
7880 			return ret;
7881 	}
7882 
7883 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
7884 		/* Kill old bigjoiner link, we may re-establish afterwards */
7885 		if (intel_crtc_needs_modeset(crtc_state) &&
7886 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
7887 			kill_bigjoiner_slave(state, crtc_state);
7888 	}
7889 
7890 	return 0;
7891 }
7892 
7893 /**
7894  * intel_atomic_check - validate state object
7895  * @dev: drm device
7896  * @_state: state to validate
7897  */
7898 static int intel_atomic_check(struct drm_device *dev,
7899 			      struct drm_atomic_state *_state)
7900 {
7901 	struct drm_i915_private *dev_priv = to_i915(dev);
7902 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
7903 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
7904 	struct intel_crtc *crtc;
7905 	int ret, i;
7906 	bool any_ms = false;
7907 
7908 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7909 					    new_crtc_state, i) {
7910 		if (new_crtc_state->inherited != old_crtc_state->inherited)
7911 			new_crtc_state->uapi.mode_changed = true;
7912 	}
7913 
7914 	intel_vrr_check_modeset(state);
7915 
7916 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
7917 	if (ret)
7918 		goto fail;
7919 
7920 	ret = intel_bigjoiner_add_affected_crtcs(state);
7921 	if (ret)
7922 		goto fail;
7923 
7924 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7925 					    new_crtc_state, i) {
7926 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
7927 			/* Light copy */
7928 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
7929 
7930 			continue;
7931 		}
7932 
7933 		if (!new_crtc_state->uapi.enable) {
7934 			if (!new_crtc_state->bigjoiner_slave) {
7935 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
7936 				any_ms = true;
7937 			}
7938 			continue;
7939 		}
7940 
7941 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
7942 		if (ret)
7943 			goto fail;
7944 
7945 		ret = intel_modeset_pipe_config(state, new_crtc_state);
7946 		if (ret)
7947 			goto fail;
7948 
7949 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
7950 						   new_crtc_state);
7951 		if (ret)
7952 			goto fail;
7953 	}
7954 
7955 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
7956 					    new_crtc_state, i) {
7957 		if (!intel_crtc_needs_modeset(new_crtc_state))
7958 			continue;
7959 
7960 		ret = intel_modeset_pipe_config_late(new_crtc_state);
7961 		if (ret)
7962 			goto fail;
7963 
7964 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
7965 	}
7966 
7967 	/**
7968 	 * Check if fastset is allowed by external dependencies like other
7969 	 * pipes and transcoders.
7970 	 *
7971 	 * Right now it only forces a fullmodeset when the MST master
7972 	 * transcoder did not changed but the pipe of the master transcoder
7973 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
7974 	 * in case of port synced crtcs, if one of the synced crtcs
7975 	 * needs a full modeset, all other synced crtcs should be
7976 	 * forced a full modeset.
7977 	 */
7978 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
7979 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
7980 			continue;
7981 
7982 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
7983 			enum transcoder master = new_crtc_state->mst_master_transcoder;
7984 
7985 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
7986 				new_crtc_state->uapi.mode_changed = true;
7987 				new_crtc_state->update_pipe = false;
7988 			}
7989 		}
7990 
7991 		if (is_trans_port_sync_mode(new_crtc_state)) {
7992 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
7993 
7994 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
7995 				trans |= BIT(new_crtc_state->master_transcoder);
7996 
7997 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
7998 				new_crtc_state->uapi.mode_changed = true;
7999 				new_crtc_state->update_pipe = false;
8000 			}
8001 		}
8002 
8003 		if (new_crtc_state->bigjoiner) {
8004 			struct intel_crtc_state *linked_crtc_state =
8005 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
8006 
8007 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
8008 				new_crtc_state->uapi.mode_changed = true;
8009 				new_crtc_state->update_pipe = false;
8010 			}
8011 		}
8012 	}
8013 
8014 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8015 					    new_crtc_state, i) {
8016 		if (intel_crtc_needs_modeset(new_crtc_state)) {
8017 			any_ms = true;
8018 			continue;
8019 		}
8020 
8021 		if (!new_crtc_state->update_pipe)
8022 			continue;
8023 
8024 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
8025 	}
8026 
8027 	if (any_ms && !check_digital_port_conflicts(state)) {
8028 		drm_dbg_kms(&dev_priv->drm,
8029 			    "rejecting conflicting digital port configuration\n");
8030 		ret = -EINVAL;
8031 		goto fail;
8032 	}
8033 
8034 	ret = drm_dp_mst_atomic_check(&state->base);
8035 	if (ret)
8036 		goto fail;
8037 
8038 	ret = intel_atomic_check_planes(state);
8039 	if (ret)
8040 		goto fail;
8041 
8042 	intel_fbc_choose_crtc(dev_priv, state);
8043 	ret = intel_compute_global_watermarks(state);
8044 	if (ret)
8045 		goto fail;
8046 
8047 	ret = intel_bw_atomic_check(state);
8048 	if (ret)
8049 		goto fail;
8050 
8051 	ret = intel_atomic_check_cdclk(state, &any_ms);
8052 	if (ret)
8053 		goto fail;
8054 
8055 	if (intel_any_crtc_needs_modeset(state))
8056 		any_ms = true;
8057 
8058 	if (any_ms) {
8059 		ret = intel_modeset_checks(state);
8060 		if (ret)
8061 			goto fail;
8062 
8063 		ret = intel_modeset_calc_cdclk(state);
8064 		if (ret)
8065 			return ret;
8066 
8067 		intel_modeset_clear_plls(state);
8068 	}
8069 
8070 	ret = intel_atomic_check_crtcs(state);
8071 	if (ret)
8072 		goto fail;
8073 
8074 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8075 					    new_crtc_state, i) {
8076 		if (new_crtc_state->uapi.async_flip) {
8077 			ret = intel_atomic_check_async(state, crtc);
8078 			if (ret)
8079 				goto fail;
8080 		}
8081 
8082 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
8083 		    !new_crtc_state->update_pipe)
8084 			continue;
8085 
8086 		intel_dump_pipe_config(new_crtc_state, state,
8087 				       intel_crtc_needs_modeset(new_crtc_state) ?
8088 				       "[modeset]" : "[fastset]");
8089 	}
8090 
8091 	return 0;
8092 
8093  fail:
8094 	if (ret == -EDEADLK)
8095 		return ret;
8096 
8097 	/*
8098 	 * FIXME would probably be nice to know which crtc specifically
8099 	 * caused the failure, in cases where we can pinpoint it.
8100 	 */
8101 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8102 					    new_crtc_state, i)
8103 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
8104 
8105 	return ret;
8106 }
8107 
8108 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
8109 {
8110 	struct intel_crtc_state *crtc_state;
8111 	struct intel_crtc *crtc;
8112 	int i, ret;
8113 
8114 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
8115 	if (ret < 0)
8116 		return ret;
8117 
8118 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8119 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
8120 
8121 		if (mode_changed || crtc_state->update_pipe ||
8122 		    crtc_state->uapi.color_mgmt_changed) {
8123 			intel_dsb_prepare(crtc_state);
8124 		}
8125 	}
8126 
8127 	return 0;
8128 }
8129 
8130 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
8131 				  struct intel_crtc_state *crtc_state)
8132 {
8133 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8134 
8135 	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
8136 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8137 
8138 	if (crtc_state->has_pch_encoder) {
8139 		enum pipe pch_transcoder =
8140 			intel_crtc_pch_transcoder(crtc);
8141 
8142 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
8143 	}
8144 }
8145 
8146 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
8147 			       const struct intel_crtc_state *new_crtc_state)
8148 {
8149 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
8150 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8151 
8152 	/*
8153 	 * Update pipe size and adjust fitter if needed: the reason for this is
8154 	 * that in compute_mode_changes we check the native mode (not the pfit
8155 	 * mode) to see if we can flip rather than do a full mode set. In the
8156 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
8157 	 * pfit state, we'll end up with a big fb scanned out into the wrong
8158 	 * sized surface.
8159 	 */
8160 	intel_set_pipe_src_size(new_crtc_state);
8161 
8162 	/* on skylake this is done by detaching scalers */
8163 	if (DISPLAY_VER(dev_priv) >= 9) {
8164 		if (new_crtc_state->pch_pfit.enabled)
8165 			skl_pfit_enable(new_crtc_state);
8166 	} else if (HAS_PCH_SPLIT(dev_priv)) {
8167 		if (new_crtc_state->pch_pfit.enabled)
8168 			ilk_pfit_enable(new_crtc_state);
8169 		else if (old_crtc_state->pch_pfit.enabled)
8170 			ilk_pfit_disable(old_crtc_state);
8171 	}
8172 
8173 	/*
8174 	 * The register is supposedly single buffered so perhaps
8175 	 * not 100% correct to do this here. But SKL+ calculate
8176 	 * this based on the adjust pixel rate so pfit changes do
8177 	 * affect it and so it must be updated for fastsets.
8178 	 * HSW/BDW only really need this here for fastboot, after
8179 	 * that the value should not change without a full modeset.
8180 	 */
8181 	if (DISPLAY_VER(dev_priv) >= 9 ||
8182 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8183 		hsw_set_linetime_wm(new_crtc_state);
8184 
8185 	if (DISPLAY_VER(dev_priv) >= 11)
8186 		icl_set_pipe_chicken(new_crtc_state);
8187 }
8188 
8189 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
8190 				   struct intel_crtc *crtc)
8191 {
8192 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8193 	const struct intel_crtc_state *old_crtc_state =
8194 		intel_atomic_get_old_crtc_state(state, crtc);
8195 	const struct intel_crtc_state *new_crtc_state =
8196 		intel_atomic_get_new_crtc_state(state, crtc);
8197 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8198 
8199 	/*
8200 	 * During modesets pipe configuration was programmed as the
8201 	 * CRTC was enabled.
8202 	 */
8203 	if (!modeset) {
8204 		if (new_crtc_state->uapi.color_mgmt_changed ||
8205 		    new_crtc_state->update_pipe)
8206 			intel_color_commit(new_crtc_state);
8207 
8208 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
8209 			bdw_set_pipemisc(new_crtc_state);
8210 
8211 		if (new_crtc_state->update_pipe)
8212 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
8213 	}
8214 
8215 	intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
8216 
8217 	intel_atomic_update_watermarks(state, crtc);
8218 }
8219 
8220 static void commit_pipe_post_planes(struct intel_atomic_state *state,
8221 				    struct intel_crtc *crtc)
8222 {
8223 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8224 	const struct intel_crtc_state *new_crtc_state =
8225 		intel_atomic_get_new_crtc_state(state, crtc);
8226 
8227 	/*
8228 	 * Disable the scaler(s) after the plane(s) so that we don't
8229 	 * get a catastrophic underrun even if the two operations
8230 	 * end up happening in two different frames.
8231 	 */
8232 	if (DISPLAY_VER(dev_priv) >= 9 &&
8233 	    !intel_crtc_needs_modeset(new_crtc_state))
8234 		skl_detach_scalers(new_crtc_state);
8235 }
8236 
8237 static void intel_enable_crtc(struct intel_atomic_state *state,
8238 			      struct intel_crtc *crtc)
8239 {
8240 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8241 	const struct intel_crtc_state *new_crtc_state =
8242 		intel_atomic_get_new_crtc_state(state, crtc);
8243 
8244 	if (!intel_crtc_needs_modeset(new_crtc_state))
8245 		return;
8246 
8247 	intel_crtc_update_active_timings(new_crtc_state);
8248 
8249 	dev_priv->display->crtc_enable(state, crtc);
8250 
8251 	if (new_crtc_state->bigjoiner_slave)
8252 		return;
8253 
8254 	/* vblanks work again, re-enable pipe CRC. */
8255 	intel_crtc_enable_pipe_crc(crtc);
8256 }
8257 
8258 static void intel_update_crtc(struct intel_atomic_state *state,
8259 			      struct intel_crtc *crtc)
8260 {
8261 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8262 	const struct intel_crtc_state *old_crtc_state =
8263 		intel_atomic_get_old_crtc_state(state, crtc);
8264 	struct intel_crtc_state *new_crtc_state =
8265 		intel_atomic_get_new_crtc_state(state, crtc);
8266 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8267 
8268 	if (!modeset) {
8269 		if (new_crtc_state->preload_luts &&
8270 		    (new_crtc_state->uapi.color_mgmt_changed ||
8271 		     new_crtc_state->update_pipe))
8272 			intel_color_load_luts(new_crtc_state);
8273 
8274 		intel_pre_plane_update(state, crtc);
8275 
8276 		if (new_crtc_state->update_pipe)
8277 			intel_encoders_update_pipe(state, crtc);
8278 	}
8279 
8280 	intel_fbc_update(state, crtc);
8281 
8282 	intel_update_planes_on_crtc(state, crtc);
8283 
8284 	/* Perform vblank evasion around commit operation */
8285 	intel_pipe_update_start(new_crtc_state);
8286 
8287 	commit_pipe_pre_planes(state, crtc);
8288 
8289 	if (DISPLAY_VER(dev_priv) >= 9)
8290 		skl_arm_planes_on_crtc(state, crtc);
8291 	else
8292 		i9xx_arm_planes_on_crtc(state, crtc);
8293 
8294 	commit_pipe_post_planes(state, crtc);
8295 
8296 	intel_pipe_update_end(new_crtc_state);
8297 
8298 	/*
8299 	 * We usually enable FIFO underrun interrupts as part of the
8300 	 * CRTC enable sequence during modesets.  But when we inherit a
8301 	 * valid pipe configuration from the BIOS we need to take care
8302 	 * of enabling them on the CRTC's first fastset.
8303 	 */
8304 	if (new_crtc_state->update_pipe && !modeset &&
8305 	    old_crtc_state->inherited)
8306 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
8307 }
8308 
8309 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
8310 					  struct intel_crtc_state *old_crtc_state,
8311 					  struct intel_crtc_state *new_crtc_state,
8312 					  struct intel_crtc *crtc)
8313 {
8314 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8315 
8316 	/*
8317 	 * We need to disable pipe CRC before disabling the pipe,
8318 	 * or we race against vblank off.
8319 	 */
8320 	intel_crtc_disable_pipe_crc(crtc);
8321 
8322 	dev_priv->display->crtc_disable(state, crtc);
8323 	crtc->active = false;
8324 	intel_fbc_disable(crtc);
8325 	intel_disable_shared_dpll(old_crtc_state);
8326 
8327 	/* FIXME unify this for all platforms */
8328 	if (!new_crtc_state->hw.active &&
8329 	    !HAS_GMCH(dev_priv))
8330 		intel_initial_watermarks(state, crtc);
8331 }
8332 
8333 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
8334 {
8335 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8336 	struct intel_crtc *crtc;
8337 	u32 handled = 0;
8338 	int i;
8339 
8340 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8341 					    new_crtc_state, i) {
8342 		if (!intel_crtc_needs_modeset(new_crtc_state))
8343 			continue;
8344 
8345 		if (!old_crtc_state->hw.active)
8346 			continue;
8347 
8348 		intel_pre_plane_update(state, crtc);
8349 		intel_crtc_disable_planes(state, crtc);
8350 	}
8351 
8352 	/* Only disable port sync and MST slaves */
8353 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8354 					    new_crtc_state, i) {
8355 		if (!intel_crtc_needs_modeset(new_crtc_state))
8356 			continue;
8357 
8358 		if (!old_crtc_state->hw.active)
8359 			continue;
8360 
8361 		/* In case of Transcoder port Sync master slave CRTCs can be
8362 		 * assigned in any order and we need to make sure that
8363 		 * slave CRTCs are disabled first and then master CRTC since
8364 		 * Slave vblanks are masked till Master Vblanks.
8365 		 */
8366 		if (!is_trans_port_sync_slave(old_crtc_state) &&
8367 		    !intel_dp_mst_is_slave_trans(old_crtc_state) &&
8368 		    !old_crtc_state->bigjoiner_slave)
8369 			continue;
8370 
8371 		intel_old_crtc_state_disables(state, old_crtc_state,
8372 					      new_crtc_state, crtc);
8373 		handled |= BIT(crtc->pipe);
8374 	}
8375 
8376 	/* Disable everything else left on */
8377 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8378 					    new_crtc_state, i) {
8379 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
8380 		    (handled & BIT(crtc->pipe)))
8381 			continue;
8382 
8383 		if (!old_crtc_state->hw.active)
8384 			continue;
8385 
8386 		intel_old_crtc_state_disables(state, old_crtc_state,
8387 					      new_crtc_state, crtc);
8388 	}
8389 }
8390 
8391 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
8392 {
8393 	struct intel_crtc_state *new_crtc_state;
8394 	struct intel_crtc *crtc;
8395 	int i;
8396 
8397 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8398 		if (!new_crtc_state->hw.active)
8399 			continue;
8400 
8401 		intel_enable_crtc(state, crtc);
8402 		intel_update_crtc(state, crtc);
8403 	}
8404 }
8405 
8406 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
8407 {
8408 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8409 	struct intel_crtc *crtc;
8410 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8411 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
8412 	u8 update_pipes = 0, modeset_pipes = 0;
8413 	int i;
8414 
8415 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8416 		enum pipe pipe = crtc->pipe;
8417 
8418 		if (!new_crtc_state->hw.active)
8419 			continue;
8420 
8421 		/* ignore allocations for crtc's that have been turned off. */
8422 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
8423 			entries[pipe] = old_crtc_state->wm.skl.ddb;
8424 			update_pipes |= BIT(pipe);
8425 		} else {
8426 			modeset_pipes |= BIT(pipe);
8427 		}
8428 	}
8429 
8430 	/*
8431 	 * Whenever the number of active pipes changes, we need to make sure we
8432 	 * update the pipes in the right order so that their ddb allocations
8433 	 * never overlap with each other between CRTC updates. Otherwise we'll
8434 	 * cause pipe underruns and other bad stuff.
8435 	 *
8436 	 * So first lets enable all pipes that do not need a fullmodeset as
8437 	 * those don't have any external dependency.
8438 	 */
8439 	while (update_pipes) {
8440 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8441 						    new_crtc_state, i) {
8442 			enum pipe pipe = crtc->pipe;
8443 
8444 			if ((update_pipes & BIT(pipe)) == 0)
8445 				continue;
8446 
8447 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8448 							entries, I915_MAX_PIPES, pipe))
8449 				continue;
8450 
8451 			entries[pipe] = new_crtc_state->wm.skl.ddb;
8452 			update_pipes &= ~BIT(pipe);
8453 
8454 			intel_update_crtc(state, crtc);
8455 
8456 			/*
8457 			 * If this is an already active pipe, it's DDB changed,
8458 			 * and this isn't the last pipe that needs updating
8459 			 * then we need to wait for a vblank to pass for the
8460 			 * new ddb allocation to take effect.
8461 			 */
8462 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
8463 						 &old_crtc_state->wm.skl.ddb) &&
8464 			    (update_pipes | modeset_pipes))
8465 				intel_wait_for_vblank(dev_priv, pipe);
8466 		}
8467 	}
8468 
8469 	update_pipes = modeset_pipes;
8470 
8471 	/*
8472 	 * Enable all pipes that needs a modeset and do not depends on other
8473 	 * pipes
8474 	 */
8475 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8476 		enum pipe pipe = crtc->pipe;
8477 
8478 		if ((modeset_pipes & BIT(pipe)) == 0)
8479 			continue;
8480 
8481 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
8482 		    is_trans_port_sync_master(new_crtc_state) ||
8483 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
8484 			continue;
8485 
8486 		modeset_pipes &= ~BIT(pipe);
8487 
8488 		intel_enable_crtc(state, crtc);
8489 	}
8490 
8491 	/*
8492 	 * Then we enable all remaining pipes that depend on other
8493 	 * pipes: MST slaves and port sync masters, big joiner master
8494 	 */
8495 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8496 		enum pipe pipe = crtc->pipe;
8497 
8498 		if ((modeset_pipes & BIT(pipe)) == 0)
8499 			continue;
8500 
8501 		modeset_pipes &= ~BIT(pipe);
8502 
8503 		intel_enable_crtc(state, crtc);
8504 	}
8505 
8506 	/*
8507 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
8508 	 */
8509 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8510 		enum pipe pipe = crtc->pipe;
8511 
8512 		if ((update_pipes & BIT(pipe)) == 0)
8513 			continue;
8514 
8515 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
8516 									entries, I915_MAX_PIPES, pipe));
8517 
8518 		entries[pipe] = new_crtc_state->wm.skl.ddb;
8519 		update_pipes &= ~BIT(pipe);
8520 
8521 		intel_update_crtc(state, crtc);
8522 	}
8523 
8524 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
8525 	drm_WARN_ON(&dev_priv->drm, update_pipes);
8526 }
8527 
8528 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
8529 {
8530 	struct intel_atomic_state *state, *next;
8531 	struct llist_node *freed;
8532 
8533 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
8534 	llist_for_each_entry_safe(state, next, freed, freed)
8535 		drm_atomic_state_put(&state->base);
8536 }
8537 
8538 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
8539 {
8540 	struct drm_i915_private *dev_priv =
8541 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
8542 
8543 	intel_atomic_helper_free_state(dev_priv);
8544 }
8545 
8546 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
8547 {
8548 	struct wait_queue_entry wait_fence, wait_reset;
8549 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
8550 
8551 	init_wait_entry(&wait_fence, 0);
8552 	init_wait_entry(&wait_reset, 0);
8553 	for (;;) {
8554 		prepare_to_wait(&intel_state->commit_ready.wait,
8555 				&wait_fence, TASK_UNINTERRUPTIBLE);
8556 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
8557 					      I915_RESET_MODESET),
8558 				&wait_reset, TASK_UNINTERRUPTIBLE);
8559 
8560 
8561 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
8562 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
8563 			break;
8564 
8565 		schedule();
8566 	}
8567 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
8568 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
8569 				  I915_RESET_MODESET),
8570 		    &wait_reset);
8571 }
8572 
8573 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
8574 {
8575 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
8576 	struct intel_crtc *crtc;
8577 	int i;
8578 
8579 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8580 					    new_crtc_state, i)
8581 		intel_dsb_cleanup(old_crtc_state);
8582 }
8583 
8584 static void intel_atomic_cleanup_work(struct work_struct *work)
8585 {
8586 	struct intel_atomic_state *state =
8587 		container_of(work, struct intel_atomic_state, base.commit_work);
8588 	struct drm_i915_private *i915 = to_i915(state->base.dev);
8589 
8590 	intel_cleanup_dsbs(state);
8591 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
8592 	drm_atomic_helper_commit_cleanup_done(&state->base);
8593 	drm_atomic_state_put(&state->base);
8594 
8595 	intel_atomic_helper_free_state(i915);
8596 }
8597 
8598 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
8599 {
8600 	struct drm_i915_private *i915 = to_i915(state->base.dev);
8601 	struct intel_plane *plane;
8602 	struct intel_plane_state *plane_state;
8603 	int i;
8604 
8605 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
8606 		struct drm_framebuffer *fb = plane_state->hw.fb;
8607 		int cc_plane;
8608 		int ret;
8609 
8610 		if (!fb)
8611 			continue;
8612 
8613 		cc_plane = intel_fb_rc_ccs_cc_plane(fb);
8614 		if (cc_plane < 0)
8615 			continue;
8616 
8617 		/*
8618 		 * The layout of the fast clear color value expected by HW
8619 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
8620 		 * - 4 x 4 bytes per-channel value
8621 		 *   (in surface type specific float/int format provided by the fb user)
8622 		 * - 8 bytes native color value used by the display
8623 		 *   (converted/written by GPU during a fast clear operation using the
8624 		 *    above per-channel values)
8625 		 *
8626 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
8627 		 * caller made sure that the object is synced wrt. the related color clear value
8628 		 * GPU write on it.
8629 		 */
8630 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
8631 						     fb->offsets[cc_plane] + 16,
8632 						     &plane_state->ccval,
8633 						     sizeof(plane_state->ccval));
8634 		/* The above could only fail if the FB obj has an unexpected backing store type. */
8635 		drm_WARN_ON(&i915->drm, ret);
8636 	}
8637 }
8638 
8639 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
8640 {
8641 	struct drm_device *dev = state->base.dev;
8642 	struct drm_i915_private *dev_priv = to_i915(dev);
8643 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
8644 	struct intel_crtc *crtc;
8645 	u64 put_domains[I915_MAX_PIPES] = {};
8646 	intel_wakeref_t wakeref = 0;
8647 	int i;
8648 
8649 	intel_atomic_commit_fence_wait(state);
8650 
8651 	drm_atomic_helper_wait_for_dependencies(&state->base);
8652 
8653 	if (state->modeset)
8654 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
8655 
8656 	intel_atomic_prepare_plane_clear_colors(state);
8657 
8658 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8659 					    new_crtc_state, i) {
8660 		if (intel_crtc_needs_modeset(new_crtc_state) ||
8661 		    new_crtc_state->update_pipe) {
8662 
8663 			put_domains[crtc->pipe] =
8664 				modeset_get_crtc_power_domains(new_crtc_state);
8665 		}
8666 	}
8667 
8668 	intel_commit_modeset_disables(state);
8669 
8670 	/* FIXME: Eventually get rid of our crtc->config pointer */
8671 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8672 		crtc->config = new_crtc_state;
8673 
8674 	if (state->modeset) {
8675 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
8676 
8677 		intel_set_cdclk_pre_plane_update(state);
8678 
8679 		intel_modeset_verify_disabled(dev_priv, state);
8680 	}
8681 
8682 	intel_sagv_pre_plane_update(state);
8683 
8684 	/* Complete the events for pipes that have now been disabled */
8685 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8686 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
8687 
8688 		/* Complete events for now disable pipes here. */
8689 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
8690 			spin_lock_irq(&dev->event_lock);
8691 			drm_crtc_send_vblank_event(&crtc->base,
8692 						   new_crtc_state->uapi.event);
8693 			spin_unlock_irq(&dev->event_lock);
8694 
8695 			new_crtc_state->uapi.event = NULL;
8696 		}
8697 	}
8698 
8699 	intel_encoders_update_prepare(state);
8700 
8701 	intel_dbuf_pre_plane_update(state);
8702 
8703 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8704 		if (new_crtc_state->uapi.async_flip)
8705 			intel_crtc_enable_flip_done(state, crtc);
8706 	}
8707 
8708 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
8709 	dev_priv->display->commit_modeset_enables(state);
8710 
8711 	intel_encoders_update_complete(state);
8712 
8713 	if (state->modeset)
8714 		intel_set_cdclk_post_plane_update(state);
8715 
8716 	intel_wait_for_vblank_workers(state);
8717 
8718 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
8719 	 * already, but still need the state for the delayed optimization. To
8720 	 * fix this:
8721 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
8722 	 * - schedule that vblank worker _before_ calling hw_done
8723 	 * - at the start of commit_tail, cancel it _synchrously
8724 	 * - switch over to the vblank wait helper in the core after that since
8725 	 *   we don't need out special handling any more.
8726 	 */
8727 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
8728 
8729 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8730 		if (new_crtc_state->uapi.async_flip)
8731 			intel_crtc_disable_flip_done(state, crtc);
8732 	}
8733 
8734 	/*
8735 	 * Now that the vblank has passed, we can go ahead and program the
8736 	 * optimal watermarks on platforms that need two-step watermark
8737 	 * programming.
8738 	 *
8739 	 * TODO: Move this (and other cleanup) to an async worker eventually.
8740 	 */
8741 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
8742 					    new_crtc_state, i) {
8743 		/*
8744 		 * Gen2 reports pipe underruns whenever all planes are disabled.
8745 		 * So re-enable underrun reporting after some planes get enabled.
8746 		 *
8747 		 * We do this before .optimize_watermarks() so that we have a
8748 		 * chance of catching underruns with the intermediate watermarks
8749 		 * vs. the new plane configuration.
8750 		 */
8751 		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
8752 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
8753 
8754 		intel_optimize_watermarks(state, crtc);
8755 	}
8756 
8757 	intel_dbuf_post_plane_update(state);
8758 	intel_psr_post_plane_update(state);
8759 
8760 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8761 		intel_post_plane_update(state, crtc);
8762 
8763 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
8764 
8765 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
8766 
8767 		/*
8768 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
8769 		 * cleanup. So copy and reset the dsb structure to sync with
8770 		 * commit_done and later do dsb cleanup in cleanup_work.
8771 		 */
8772 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
8773 	}
8774 
8775 	/* Underruns don't always raise interrupts, so check manually */
8776 	intel_check_cpu_fifo_underruns(dev_priv);
8777 	intel_check_pch_fifo_underruns(dev_priv);
8778 
8779 	if (state->modeset)
8780 		intel_verify_planes(state);
8781 
8782 	intel_sagv_post_plane_update(state);
8783 
8784 	drm_atomic_helper_commit_hw_done(&state->base);
8785 
8786 	if (state->modeset) {
8787 		/* As one of the primary mmio accessors, KMS has a high
8788 		 * likelihood of triggering bugs in unclaimed access. After we
8789 		 * finish modesetting, see if an error has been flagged, and if
8790 		 * so enable debugging for the next modeset - and hope we catch
8791 		 * the culprit.
8792 		 */
8793 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
8794 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
8795 	}
8796 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8797 
8798 	/*
8799 	 * Defer the cleanup of the old state to a separate worker to not
8800 	 * impede the current task (userspace for blocking modesets) that
8801 	 * are executed inline. For out-of-line asynchronous modesets/flips,
8802 	 * deferring to a new worker seems overkill, but we would place a
8803 	 * schedule point (cond_resched()) here anyway to keep latencies
8804 	 * down.
8805 	 */
8806 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
8807 	queue_work(system_highpri_wq, &state->base.commit_work);
8808 }
8809 
8810 static void intel_atomic_commit_work(struct work_struct *work)
8811 {
8812 	struct intel_atomic_state *state =
8813 		container_of(work, struct intel_atomic_state, base.commit_work);
8814 
8815 	intel_atomic_commit_tail(state);
8816 }
8817 
8818 static int
8819 intel_atomic_commit_ready(struct i915_sw_fence *fence,
8820 			  enum i915_sw_fence_notify notify)
8821 {
8822 	struct intel_atomic_state *state =
8823 		container_of(fence, struct intel_atomic_state, commit_ready);
8824 
8825 	switch (notify) {
8826 	case FENCE_COMPLETE:
8827 		/* we do blocking waits in the worker, nothing to do here */
8828 		break;
8829 	case FENCE_FREE:
8830 		{
8831 			struct intel_atomic_helper *helper =
8832 				&to_i915(state->base.dev)->atomic_helper;
8833 
8834 			if (llist_add(&state->freed, &helper->free_list))
8835 				schedule_work(&helper->free_work);
8836 			break;
8837 		}
8838 	}
8839 
8840 	return NOTIFY_DONE;
8841 }
8842 
8843 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
8844 {
8845 	struct intel_plane_state *old_plane_state, *new_plane_state;
8846 	struct intel_plane *plane;
8847 	int i;
8848 
8849 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
8850 					     new_plane_state, i)
8851 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
8852 					to_intel_frontbuffer(new_plane_state->hw.fb),
8853 					plane->frontbuffer_bit);
8854 }
8855 
8856 static int intel_atomic_commit(struct drm_device *dev,
8857 			       struct drm_atomic_state *_state,
8858 			       bool nonblock)
8859 {
8860 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
8861 	struct drm_i915_private *dev_priv = to_i915(dev);
8862 	int ret = 0;
8863 
8864 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
8865 
8866 	drm_atomic_state_get(&state->base);
8867 	i915_sw_fence_init(&state->commit_ready,
8868 			   intel_atomic_commit_ready);
8869 
8870 	/*
8871 	 * The intel_legacy_cursor_update() fast path takes care
8872 	 * of avoiding the vblank waits for simple cursor
8873 	 * movement and flips. For cursor on/off and size changes,
8874 	 * we want to perform the vblank waits so that watermark
8875 	 * updates happen during the correct frames. Gen9+ have
8876 	 * double buffered watermarks and so shouldn't need this.
8877 	 *
8878 	 * Unset state->legacy_cursor_update before the call to
8879 	 * drm_atomic_helper_setup_commit() because otherwise
8880 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
8881 	 * we get FIFO underruns because we didn't wait
8882 	 * for vblank.
8883 	 *
8884 	 * FIXME doing watermarks and fb cleanup from a vblank worker
8885 	 * (assuming we had any) would solve these problems.
8886 	 */
8887 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
8888 		struct intel_crtc_state *new_crtc_state;
8889 		struct intel_crtc *crtc;
8890 		int i;
8891 
8892 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8893 			if (new_crtc_state->wm.need_postvbl_update ||
8894 			    new_crtc_state->update_wm_post)
8895 				state->base.legacy_cursor_update = false;
8896 	}
8897 
8898 	ret = intel_atomic_prepare_commit(state);
8899 	if (ret) {
8900 		drm_dbg_atomic(&dev_priv->drm,
8901 			       "Preparing state failed with %i\n", ret);
8902 		i915_sw_fence_commit(&state->commit_ready);
8903 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8904 		return ret;
8905 	}
8906 
8907 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
8908 	if (!ret)
8909 		ret = drm_atomic_helper_swap_state(&state->base, true);
8910 	if (!ret)
8911 		intel_atomic_swap_global_state(state);
8912 
8913 	if (ret) {
8914 		struct intel_crtc_state *new_crtc_state;
8915 		struct intel_crtc *crtc;
8916 		int i;
8917 
8918 		i915_sw_fence_commit(&state->commit_ready);
8919 
8920 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
8921 			intel_dsb_cleanup(new_crtc_state);
8922 
8923 		drm_atomic_helper_cleanup_planes(dev, &state->base);
8924 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
8925 		return ret;
8926 	}
8927 	intel_shared_dpll_swap_state(state);
8928 	intel_atomic_track_fbs(state);
8929 
8930 	drm_atomic_state_get(&state->base);
8931 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
8932 
8933 	i915_sw_fence_commit(&state->commit_ready);
8934 	if (nonblock && state->modeset) {
8935 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
8936 	} else if (nonblock) {
8937 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
8938 	} else {
8939 		if (state->modeset)
8940 			flush_workqueue(dev_priv->modeset_wq);
8941 		intel_atomic_commit_tail(state);
8942 	}
8943 
8944 	return 0;
8945 }
8946 
8947 /**
8948  * intel_plane_destroy - destroy a plane
8949  * @plane: plane to destroy
8950  *
8951  * Common destruction function for all types of planes (primary, cursor,
8952  * sprite).
8953  */
8954 void intel_plane_destroy(struct drm_plane *plane)
8955 {
8956 	drm_plane_cleanup(plane);
8957 	kfree(to_intel_plane(plane));
8958 }
8959 
8960 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
8961 {
8962 	struct intel_plane *plane;
8963 
8964 	for_each_intel_plane(&dev_priv->drm, plane) {
8965 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
8966 								  plane->pipe);
8967 
8968 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
8969 	}
8970 }
8971 
8972 
8973 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
8974 				      struct drm_file *file)
8975 {
8976 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8977 	struct drm_crtc *drmmode_crtc;
8978 	struct intel_crtc *crtc;
8979 
8980 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
8981 	if (!drmmode_crtc)
8982 		return -ENOENT;
8983 
8984 	crtc = to_intel_crtc(drmmode_crtc);
8985 	pipe_from_crtc_id->pipe = crtc->pipe;
8986 
8987 	return 0;
8988 }
8989 
8990 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
8991 {
8992 	struct drm_device *dev = encoder->base.dev;
8993 	struct intel_encoder *source_encoder;
8994 	u32 possible_clones = 0;
8995 
8996 	for_each_intel_encoder(dev, source_encoder) {
8997 		if (encoders_cloneable(encoder, source_encoder))
8998 			possible_clones |= drm_encoder_mask(&source_encoder->base);
8999 	}
9000 
9001 	return possible_clones;
9002 }
9003 
9004 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
9005 {
9006 	struct drm_device *dev = encoder->base.dev;
9007 	struct intel_crtc *crtc;
9008 	u32 possible_crtcs = 0;
9009 
9010 	for_each_intel_crtc(dev, crtc) {
9011 		if (encoder->pipe_mask & BIT(crtc->pipe))
9012 			possible_crtcs |= drm_crtc_mask(&crtc->base);
9013 	}
9014 
9015 	return possible_crtcs;
9016 }
9017 
9018 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
9019 {
9020 	if (!IS_MOBILE(dev_priv))
9021 		return false;
9022 
9023 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
9024 		return false;
9025 
9026 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
9027 		return false;
9028 
9029 	return true;
9030 }
9031 
9032 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
9033 {
9034 	if (DISPLAY_VER(dev_priv) >= 9)
9035 		return false;
9036 
9037 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
9038 		return false;
9039 
9040 	if (HAS_PCH_LPT_H(dev_priv) &&
9041 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
9042 		return false;
9043 
9044 	/* DDI E can't be used if DDI A requires 4 lanes */
9045 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
9046 		return false;
9047 
9048 	if (!dev_priv->vbt.int_crt_support)
9049 		return false;
9050 
9051 	return true;
9052 }
9053 
9054 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
9055 {
9056 	struct intel_encoder *encoder;
9057 	bool dpd_is_edp = false;
9058 
9059 	intel_pps_unlock_regs_wa(dev_priv);
9060 
9061 	if (!HAS_DISPLAY(dev_priv))
9062 		return;
9063 
9064 	if (IS_DG2(dev_priv)) {
9065 		intel_ddi_init(dev_priv, PORT_A);
9066 		intel_ddi_init(dev_priv, PORT_B);
9067 		intel_ddi_init(dev_priv, PORT_C);
9068 		intel_ddi_init(dev_priv, PORT_D_XELPD);
9069 	} else if (IS_ALDERLAKE_P(dev_priv)) {
9070 		intel_ddi_init(dev_priv, PORT_A);
9071 		intel_ddi_init(dev_priv, PORT_B);
9072 		intel_ddi_init(dev_priv, PORT_TC1);
9073 		intel_ddi_init(dev_priv, PORT_TC2);
9074 		intel_ddi_init(dev_priv, PORT_TC3);
9075 		intel_ddi_init(dev_priv, PORT_TC4);
9076 		icl_dsi_init(dev_priv);
9077 	} else if (IS_ALDERLAKE_S(dev_priv)) {
9078 		intel_ddi_init(dev_priv, PORT_A);
9079 		intel_ddi_init(dev_priv, PORT_TC1);
9080 		intel_ddi_init(dev_priv, PORT_TC2);
9081 		intel_ddi_init(dev_priv, PORT_TC3);
9082 		intel_ddi_init(dev_priv, PORT_TC4);
9083 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
9084 		intel_ddi_init(dev_priv, PORT_A);
9085 		intel_ddi_init(dev_priv, PORT_B);
9086 		intel_ddi_init(dev_priv, PORT_TC1);
9087 		intel_ddi_init(dev_priv, PORT_TC2);
9088 	} else if (DISPLAY_VER(dev_priv) >= 12) {
9089 		intel_ddi_init(dev_priv, PORT_A);
9090 		intel_ddi_init(dev_priv, PORT_B);
9091 		intel_ddi_init(dev_priv, PORT_TC1);
9092 		intel_ddi_init(dev_priv, PORT_TC2);
9093 		intel_ddi_init(dev_priv, PORT_TC3);
9094 		intel_ddi_init(dev_priv, PORT_TC4);
9095 		intel_ddi_init(dev_priv, PORT_TC5);
9096 		intel_ddi_init(dev_priv, PORT_TC6);
9097 		icl_dsi_init(dev_priv);
9098 	} else if (IS_JSL_EHL(dev_priv)) {
9099 		intel_ddi_init(dev_priv, PORT_A);
9100 		intel_ddi_init(dev_priv, PORT_B);
9101 		intel_ddi_init(dev_priv, PORT_C);
9102 		intel_ddi_init(dev_priv, PORT_D);
9103 		icl_dsi_init(dev_priv);
9104 	} else if (DISPLAY_VER(dev_priv) == 11) {
9105 		intel_ddi_init(dev_priv, PORT_A);
9106 		intel_ddi_init(dev_priv, PORT_B);
9107 		intel_ddi_init(dev_priv, PORT_C);
9108 		intel_ddi_init(dev_priv, PORT_D);
9109 		intel_ddi_init(dev_priv, PORT_E);
9110 		intel_ddi_init(dev_priv, PORT_F);
9111 		icl_dsi_init(dev_priv);
9112 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
9113 		intel_ddi_init(dev_priv, PORT_A);
9114 		intel_ddi_init(dev_priv, PORT_B);
9115 		intel_ddi_init(dev_priv, PORT_C);
9116 		vlv_dsi_init(dev_priv);
9117 	} else if (DISPLAY_VER(dev_priv) >= 9) {
9118 		intel_ddi_init(dev_priv, PORT_A);
9119 		intel_ddi_init(dev_priv, PORT_B);
9120 		intel_ddi_init(dev_priv, PORT_C);
9121 		intel_ddi_init(dev_priv, PORT_D);
9122 		intel_ddi_init(dev_priv, PORT_E);
9123 	} else if (HAS_DDI(dev_priv)) {
9124 		u32 found;
9125 
9126 		if (intel_ddi_crt_present(dev_priv))
9127 			intel_crt_init(dev_priv);
9128 
9129 		/* Haswell uses DDI functions to detect digital outputs. */
9130 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
9131 		if (found)
9132 			intel_ddi_init(dev_priv, PORT_A);
9133 
9134 		found = intel_de_read(dev_priv, SFUSE_STRAP);
9135 		if (found & SFUSE_STRAP_DDIB_DETECTED)
9136 			intel_ddi_init(dev_priv, PORT_B);
9137 		if (found & SFUSE_STRAP_DDIC_DETECTED)
9138 			intel_ddi_init(dev_priv, PORT_C);
9139 		if (found & SFUSE_STRAP_DDID_DETECTED)
9140 			intel_ddi_init(dev_priv, PORT_D);
9141 		if (found & SFUSE_STRAP_DDIF_DETECTED)
9142 			intel_ddi_init(dev_priv, PORT_F);
9143 	} else if (HAS_PCH_SPLIT(dev_priv)) {
9144 		int found;
9145 
9146 		/*
9147 		 * intel_edp_init_connector() depends on this completing first,
9148 		 * to prevent the registration of both eDP and LVDS and the
9149 		 * incorrect sharing of the PPS.
9150 		 */
9151 		intel_lvds_init(dev_priv);
9152 		intel_crt_init(dev_priv);
9153 
9154 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
9155 
9156 		if (ilk_has_edp_a(dev_priv))
9157 			g4x_dp_init(dev_priv, DP_A, PORT_A);
9158 
9159 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
9160 			/* PCH SDVOB multiplex with HDMIB */
9161 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
9162 			if (!found)
9163 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
9164 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
9165 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
9166 		}
9167 
9168 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
9169 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
9170 
9171 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
9172 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
9173 
9174 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
9175 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
9176 
9177 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
9178 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
9179 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9180 		bool has_edp, has_port;
9181 
9182 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
9183 			intel_crt_init(dev_priv);
9184 
9185 		/*
9186 		 * The DP_DETECTED bit is the latched state of the DDC
9187 		 * SDA pin at boot. However since eDP doesn't require DDC
9188 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
9189 		 * eDP ports may have been muxed to an alternate function.
9190 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
9191 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
9192 		 * detect eDP ports.
9193 		 *
9194 		 * Sadly the straps seem to be missing sometimes even for HDMI
9195 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
9196 		 * and VBT for the presence of the port. Additionally we can't
9197 		 * trust the port type the VBT declares as we've seen at least
9198 		 * HDMI ports that the VBT claim are DP or eDP.
9199 		 */
9200 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
9201 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
9202 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
9203 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
9204 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
9205 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
9206 
9207 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
9208 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
9209 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
9210 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
9211 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
9212 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
9213 
9214 		if (IS_CHERRYVIEW(dev_priv)) {
9215 			/*
9216 			 * eDP not supported on port D,
9217 			 * so no need to worry about it
9218 			 */
9219 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
9220 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
9221 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
9222 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
9223 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9224 		}
9225 
9226 		vlv_dsi_init(dev_priv);
9227 	} else if (IS_PINEVIEW(dev_priv)) {
9228 		intel_lvds_init(dev_priv);
9229 		intel_crt_init(dev_priv);
9230 	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
9231 		bool found = false;
9232 
9233 		if (IS_MOBILE(dev_priv))
9234 			intel_lvds_init(dev_priv);
9235 
9236 		intel_crt_init(dev_priv);
9237 
9238 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9239 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
9240 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9241 			if (!found && IS_G4X(dev_priv)) {
9242 				drm_dbg_kms(&dev_priv->drm,
9243 					    "probing HDMI on SDVOB\n");
9244 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
9245 			}
9246 
9247 			if (!found && IS_G4X(dev_priv))
9248 				g4x_dp_init(dev_priv, DP_B, PORT_B);
9249 		}
9250 
9251 		/* Before G4X SDVOC doesn't have its own detect register */
9252 
9253 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
9254 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
9255 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
9256 		}
9257 
9258 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
9259 
9260 			if (IS_G4X(dev_priv)) {
9261 				drm_dbg_kms(&dev_priv->drm,
9262 					    "probing HDMI on SDVOC\n");
9263 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
9264 			}
9265 			if (IS_G4X(dev_priv))
9266 				g4x_dp_init(dev_priv, DP_C, PORT_C);
9267 		}
9268 
9269 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
9270 			g4x_dp_init(dev_priv, DP_D, PORT_D);
9271 
9272 		if (SUPPORTS_TV(dev_priv))
9273 			intel_tv_init(dev_priv);
9274 	} else if (DISPLAY_VER(dev_priv) == 2) {
9275 		if (IS_I85X(dev_priv))
9276 			intel_lvds_init(dev_priv);
9277 
9278 		intel_crt_init(dev_priv);
9279 		intel_dvo_init(dev_priv);
9280 	}
9281 
9282 	for_each_intel_encoder(&dev_priv->drm, encoder) {
9283 		encoder->base.possible_crtcs =
9284 			intel_encoder_possible_crtcs(encoder);
9285 		encoder->base.possible_clones =
9286 			intel_encoder_possible_clones(encoder);
9287 	}
9288 
9289 	intel_init_pch_refclk(dev_priv);
9290 
9291 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
9292 }
9293 
9294 static enum drm_mode_status
9295 intel_mode_valid(struct drm_device *dev,
9296 		 const struct drm_display_mode *mode)
9297 {
9298 	struct drm_i915_private *dev_priv = to_i915(dev);
9299 	int hdisplay_max, htotal_max;
9300 	int vdisplay_max, vtotal_max;
9301 
9302 	/*
9303 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
9304 	 * of DBLSCAN modes to the output's mode list when they detect
9305 	 * the scaling mode property on the connector. And they don't
9306 	 * ask the kernel to validate those modes in any way until
9307 	 * modeset time at which point the client gets a protocol error.
9308 	 * So in order to not upset those clients we silently ignore the
9309 	 * DBLSCAN flag on such connectors. For other connectors we will
9310 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
9311 	 * And we always reject DBLSCAN modes in connector->mode_valid()
9312 	 * as we never want such modes on the connector's mode list.
9313 	 */
9314 
9315 	if (mode->vscan > 1)
9316 		return MODE_NO_VSCAN;
9317 
9318 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
9319 		return MODE_H_ILLEGAL;
9320 
9321 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
9322 			   DRM_MODE_FLAG_NCSYNC |
9323 			   DRM_MODE_FLAG_PCSYNC))
9324 		return MODE_HSYNC;
9325 
9326 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
9327 			   DRM_MODE_FLAG_PIXMUX |
9328 			   DRM_MODE_FLAG_CLKDIV2))
9329 		return MODE_BAD;
9330 
9331 	/* Transcoder timing limits */
9332 	if (DISPLAY_VER(dev_priv) >= 11) {
9333 		hdisplay_max = 16384;
9334 		vdisplay_max = 8192;
9335 		htotal_max = 16384;
9336 		vtotal_max = 8192;
9337 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
9338 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
9339 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
9340 		vdisplay_max = 4096;
9341 		htotal_max = 8192;
9342 		vtotal_max = 8192;
9343 	} else if (DISPLAY_VER(dev_priv) >= 3) {
9344 		hdisplay_max = 4096;
9345 		vdisplay_max = 4096;
9346 		htotal_max = 8192;
9347 		vtotal_max = 8192;
9348 	} else {
9349 		hdisplay_max = 2048;
9350 		vdisplay_max = 2048;
9351 		htotal_max = 4096;
9352 		vtotal_max = 4096;
9353 	}
9354 
9355 	if (mode->hdisplay > hdisplay_max ||
9356 	    mode->hsync_start > htotal_max ||
9357 	    mode->hsync_end > htotal_max ||
9358 	    mode->htotal > htotal_max)
9359 		return MODE_H_ILLEGAL;
9360 
9361 	if (mode->vdisplay > vdisplay_max ||
9362 	    mode->vsync_start > vtotal_max ||
9363 	    mode->vsync_end > vtotal_max ||
9364 	    mode->vtotal > vtotal_max)
9365 		return MODE_V_ILLEGAL;
9366 
9367 	if (DISPLAY_VER(dev_priv) >= 5) {
9368 		if (mode->hdisplay < 64 ||
9369 		    mode->htotal - mode->hdisplay < 32)
9370 			return MODE_H_ILLEGAL;
9371 
9372 		if (mode->vtotal - mode->vdisplay < 5)
9373 			return MODE_V_ILLEGAL;
9374 	} else {
9375 		if (mode->htotal - mode->hdisplay < 32)
9376 			return MODE_H_ILLEGAL;
9377 
9378 		if (mode->vtotal - mode->vdisplay < 3)
9379 			return MODE_V_ILLEGAL;
9380 	}
9381 
9382 	/*
9383 	 * Cantiga+ cannot handle modes with a hsync front porch of 0.
9384 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
9385 	 */
9386 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
9387 	    mode->hsync_start == mode->hdisplay)
9388 		return MODE_H_ILLEGAL;
9389 
9390 	return MODE_OK;
9391 }
9392 
9393 enum drm_mode_status
9394 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
9395 				const struct drm_display_mode *mode,
9396 				bool bigjoiner)
9397 {
9398 	int plane_width_max, plane_height_max;
9399 
9400 	/*
9401 	 * intel_mode_valid() should be
9402 	 * sufficient on older platforms.
9403 	 */
9404 	if (DISPLAY_VER(dev_priv) < 9)
9405 		return MODE_OK;
9406 
9407 	/*
9408 	 * Most people will probably want a fullscreen
9409 	 * plane so let's not advertize modes that are
9410 	 * too big for that.
9411 	 */
9412 	if (DISPLAY_VER(dev_priv) >= 11) {
9413 		plane_width_max = 5120 << bigjoiner;
9414 		plane_height_max = 4320;
9415 	} else {
9416 		plane_width_max = 5120;
9417 		plane_height_max = 4096;
9418 	}
9419 
9420 	if (mode->hdisplay > plane_width_max)
9421 		return MODE_H_ILLEGAL;
9422 
9423 	if (mode->vdisplay > plane_height_max)
9424 		return MODE_V_ILLEGAL;
9425 
9426 	return MODE_OK;
9427 }
9428 
9429 static const struct drm_mode_config_funcs intel_mode_funcs = {
9430 	.fb_create = intel_user_framebuffer_create,
9431 	.get_format_info = intel_fb_get_format_info,
9432 	.output_poll_changed = intel_fbdev_output_poll_changed,
9433 	.mode_valid = intel_mode_valid,
9434 	.atomic_check = intel_atomic_check,
9435 	.atomic_commit = intel_atomic_commit,
9436 	.atomic_state_alloc = intel_atomic_state_alloc,
9437 	.atomic_state_clear = intel_atomic_state_clear,
9438 	.atomic_state_free = intel_atomic_state_free,
9439 };
9440 
9441 static const struct drm_i915_display_funcs skl_display_funcs = {
9442 	.get_pipe_config = hsw_get_pipe_config,
9443 	.crtc_enable = hsw_crtc_enable,
9444 	.crtc_disable = hsw_crtc_disable,
9445 	.commit_modeset_enables = skl_commit_modeset_enables,
9446 	.get_initial_plane_config = skl_get_initial_plane_config,
9447 };
9448 
9449 static const struct drm_i915_display_funcs ddi_display_funcs = {
9450 	.get_pipe_config = hsw_get_pipe_config,
9451 	.crtc_enable = hsw_crtc_enable,
9452 	.crtc_disable = hsw_crtc_disable,
9453 	.commit_modeset_enables = intel_commit_modeset_enables,
9454 	.get_initial_plane_config = i9xx_get_initial_plane_config,
9455 };
9456 
9457 static const struct drm_i915_display_funcs pch_split_display_funcs = {
9458 	.get_pipe_config = ilk_get_pipe_config,
9459 	.crtc_enable = ilk_crtc_enable,
9460 	.crtc_disable = ilk_crtc_disable,
9461 	.commit_modeset_enables = intel_commit_modeset_enables,
9462 	.get_initial_plane_config = i9xx_get_initial_plane_config,
9463 };
9464 
9465 static const struct drm_i915_display_funcs vlv_display_funcs = {
9466 	.get_pipe_config = i9xx_get_pipe_config,
9467 	.crtc_enable = valleyview_crtc_enable,
9468 	.crtc_disable = i9xx_crtc_disable,
9469 	.commit_modeset_enables = intel_commit_modeset_enables,
9470 	.get_initial_plane_config = i9xx_get_initial_plane_config,
9471 };
9472 
9473 static const struct drm_i915_display_funcs i9xx_display_funcs = {
9474 	.get_pipe_config = i9xx_get_pipe_config,
9475 	.crtc_enable = i9xx_crtc_enable,
9476 	.crtc_disable = i9xx_crtc_disable,
9477 	.commit_modeset_enables = intel_commit_modeset_enables,
9478 	.get_initial_plane_config = i9xx_get_initial_plane_config,
9479 };
9480 
9481 /**
9482  * intel_init_display_hooks - initialize the display modesetting hooks
9483  * @dev_priv: device private
9484  */
9485 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
9486 {
9487 	if (!HAS_DISPLAY(dev_priv))
9488 		return;
9489 
9490 	intel_init_cdclk_hooks(dev_priv);
9491 	intel_audio_hooks_init(dev_priv);
9492 
9493 	intel_dpll_init_clock_hook(dev_priv);
9494 
9495 	if (DISPLAY_VER(dev_priv) >= 9) {
9496 		dev_priv->display = &skl_display_funcs;
9497 	} else if (HAS_DDI(dev_priv)) {
9498 		dev_priv->display = &ddi_display_funcs;
9499 	} else if (HAS_PCH_SPLIT(dev_priv)) {
9500 		dev_priv->display = &pch_split_display_funcs;
9501 	} else if (IS_CHERRYVIEW(dev_priv) ||
9502 		   IS_VALLEYVIEW(dev_priv)) {
9503 		dev_priv->display = &vlv_display_funcs;
9504 	} else {
9505 		dev_priv->display = &i9xx_display_funcs;
9506 	}
9507 
9508 	intel_fdi_init_hook(dev_priv);
9509 }
9510 
9511 void intel_modeset_init_hw(struct drm_i915_private *i915)
9512 {
9513 	struct intel_cdclk_state *cdclk_state;
9514 
9515 	if (!HAS_DISPLAY(i915))
9516 		return;
9517 
9518 	cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
9519 
9520 	intel_update_cdclk(i915);
9521 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
9522 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
9523 }
9524 
9525 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
9526 {
9527 	struct drm_plane *plane;
9528 	struct intel_crtc *crtc;
9529 
9530 	for_each_intel_crtc(state->dev, crtc) {
9531 		struct intel_crtc_state *crtc_state;
9532 
9533 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
9534 		if (IS_ERR(crtc_state))
9535 			return PTR_ERR(crtc_state);
9536 
9537 		if (crtc_state->hw.active) {
9538 			/*
9539 			 * Preserve the inherited flag to avoid
9540 			 * taking the full modeset path.
9541 			 */
9542 			crtc_state->inherited = true;
9543 		}
9544 	}
9545 
9546 	drm_for_each_plane(plane, state->dev) {
9547 		struct drm_plane_state *plane_state;
9548 
9549 		plane_state = drm_atomic_get_plane_state(state, plane);
9550 		if (IS_ERR(plane_state))
9551 			return PTR_ERR(plane_state);
9552 	}
9553 
9554 	return 0;
9555 }
9556 
9557 /*
9558  * Calculate what we think the watermarks should be for the state we've read
9559  * out of the hardware and then immediately program those watermarks so that
9560  * we ensure the hardware settings match our internal state.
9561  *
9562  * We can calculate what we think WM's should be by creating a duplicate of the
9563  * current state (which was constructed during hardware readout) and running it
9564  * through the atomic check code to calculate new watermark values in the
9565  * state object.
9566  */
9567 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
9568 {
9569 	struct drm_atomic_state *state;
9570 	struct intel_atomic_state *intel_state;
9571 	struct intel_crtc *crtc;
9572 	struct intel_crtc_state *crtc_state;
9573 	struct drm_modeset_acquire_ctx ctx;
9574 	int ret;
9575 	int i;
9576 
9577 	/* Only supported on platforms that use atomic watermark design */
9578 	if (!dev_priv->wm_disp->optimize_watermarks)
9579 		return;
9580 
9581 	state = drm_atomic_state_alloc(&dev_priv->drm);
9582 	if (drm_WARN_ON(&dev_priv->drm, !state))
9583 		return;
9584 
9585 	intel_state = to_intel_atomic_state(state);
9586 
9587 	drm_modeset_acquire_init(&ctx, 0);
9588 
9589 retry:
9590 	state->acquire_ctx = &ctx;
9591 
9592 	/*
9593 	 * Hardware readout is the only time we don't want to calculate
9594 	 * intermediate watermarks (since we don't trust the current
9595 	 * watermarks).
9596 	 */
9597 	if (!HAS_GMCH(dev_priv))
9598 		intel_state->skip_intermediate_wm = true;
9599 
9600 	ret = sanitize_watermarks_add_affected(state);
9601 	if (ret)
9602 		goto fail;
9603 
9604 	ret = intel_atomic_check(&dev_priv->drm, state);
9605 	if (ret)
9606 		goto fail;
9607 
9608 	/* Write calculated watermark values back */
9609 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
9610 		crtc_state->wm.need_postvbl_update = true;
9611 		intel_optimize_watermarks(intel_state, crtc);
9612 
9613 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
9614 	}
9615 
9616 fail:
9617 	if (ret == -EDEADLK) {
9618 		drm_atomic_state_clear(state);
9619 		drm_modeset_backoff(&ctx);
9620 		goto retry;
9621 	}
9622 
9623 	/*
9624 	 * If we fail here, it means that the hardware appears to be
9625 	 * programmed in a way that shouldn't be possible, given our
9626 	 * understanding of watermark requirements.  This might mean a
9627 	 * mistake in the hardware readout code or a mistake in the
9628 	 * watermark calculations for a given platform.  Raise a WARN
9629 	 * so that this is noticeable.
9630 	 *
9631 	 * If this actually happens, we'll have to just leave the
9632 	 * BIOS-programmed watermarks untouched and hope for the best.
9633 	 */
9634 	drm_WARN(&dev_priv->drm, ret,
9635 		 "Could not determine valid watermarks for inherited state\n");
9636 
9637 	drm_atomic_state_put(state);
9638 
9639 	drm_modeset_drop_locks(&ctx);
9640 	drm_modeset_acquire_fini(&ctx);
9641 }
9642 
9643 static int intel_initial_commit(struct drm_device *dev)
9644 {
9645 	struct drm_atomic_state *state = NULL;
9646 	struct drm_modeset_acquire_ctx ctx;
9647 	struct intel_crtc *crtc;
9648 	int ret = 0;
9649 
9650 	state = drm_atomic_state_alloc(dev);
9651 	if (!state)
9652 		return -ENOMEM;
9653 
9654 	drm_modeset_acquire_init(&ctx, 0);
9655 
9656 retry:
9657 	state->acquire_ctx = &ctx;
9658 
9659 	for_each_intel_crtc(dev, crtc) {
9660 		struct intel_crtc_state *crtc_state =
9661 			intel_atomic_get_crtc_state(state, crtc);
9662 
9663 		if (IS_ERR(crtc_state)) {
9664 			ret = PTR_ERR(crtc_state);
9665 			goto out;
9666 		}
9667 
9668 		if (crtc_state->hw.active) {
9669 			struct intel_encoder *encoder;
9670 
9671 			/*
9672 			 * We've not yet detected sink capabilities
9673 			 * (audio,infoframes,etc.) and thus we don't want to
9674 			 * force a full state recomputation yet. We want that to
9675 			 * happen only for the first real commit from userspace.
9676 			 * So preserve the inherited flag for the time being.
9677 			 */
9678 			crtc_state->inherited = true;
9679 
9680 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
9681 			if (ret)
9682 				goto out;
9683 
9684 			/*
9685 			 * FIXME hack to force a LUT update to avoid the
9686 			 * plane update forcing the pipe gamma on without
9687 			 * having a proper LUT loaded. Remove once we
9688 			 * have readout for pipe gamma enable.
9689 			 */
9690 			crtc_state->uapi.color_mgmt_changed = true;
9691 
9692 			for_each_intel_encoder_mask(dev, encoder,
9693 						    crtc_state->uapi.encoder_mask) {
9694 				if (encoder->initial_fastset_check &&
9695 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
9696 					ret = drm_atomic_add_affected_connectors(state,
9697 										 &crtc->base);
9698 					if (ret)
9699 						goto out;
9700 				}
9701 			}
9702 		}
9703 	}
9704 
9705 	ret = drm_atomic_commit(state);
9706 
9707 out:
9708 	if (ret == -EDEADLK) {
9709 		drm_atomic_state_clear(state);
9710 		drm_modeset_backoff(&ctx);
9711 		goto retry;
9712 	}
9713 
9714 	drm_atomic_state_put(state);
9715 
9716 	drm_modeset_drop_locks(&ctx);
9717 	drm_modeset_acquire_fini(&ctx);
9718 
9719 	return ret;
9720 }
9721 
9722 static void intel_mode_config_init(struct drm_i915_private *i915)
9723 {
9724 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
9725 
9726 	drm_mode_config_init(&i915->drm);
9727 	INIT_LIST_HEAD(&i915->global_obj_list);
9728 
9729 	mode_config->min_width = 0;
9730 	mode_config->min_height = 0;
9731 
9732 	mode_config->preferred_depth = 24;
9733 	mode_config->prefer_shadow = 1;
9734 
9735 	mode_config->funcs = &intel_mode_funcs;
9736 
9737 	mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
9738 
9739 	/*
9740 	 * Maximum framebuffer dimensions, chosen to match
9741 	 * the maximum render engine surface size on gen4+.
9742 	 */
9743 	if (DISPLAY_VER(i915) >= 7) {
9744 		mode_config->max_width = 16384;
9745 		mode_config->max_height = 16384;
9746 	} else if (DISPLAY_VER(i915) >= 4) {
9747 		mode_config->max_width = 8192;
9748 		mode_config->max_height = 8192;
9749 	} else if (DISPLAY_VER(i915) == 3) {
9750 		mode_config->max_width = 4096;
9751 		mode_config->max_height = 4096;
9752 	} else {
9753 		mode_config->max_width = 2048;
9754 		mode_config->max_height = 2048;
9755 	}
9756 
9757 	if (IS_I845G(i915) || IS_I865G(i915)) {
9758 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
9759 		mode_config->cursor_height = 1023;
9760 	} else if (IS_I830(i915) || IS_I85X(i915) ||
9761 		   IS_I915G(i915) || IS_I915GM(i915)) {
9762 		mode_config->cursor_width = 64;
9763 		mode_config->cursor_height = 64;
9764 	} else {
9765 		mode_config->cursor_width = 256;
9766 		mode_config->cursor_height = 256;
9767 	}
9768 }
9769 
9770 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
9771 {
9772 	intel_atomic_global_obj_cleanup(i915);
9773 	drm_mode_config_cleanup(&i915->drm);
9774 }
9775 
9776 /* part #1: call before irq install */
9777 int intel_modeset_init_noirq(struct drm_i915_private *i915)
9778 {
9779 	int ret;
9780 
9781 	if (i915_inject_probe_failure(i915))
9782 		return -ENODEV;
9783 
9784 	if (HAS_DISPLAY(i915)) {
9785 		ret = drm_vblank_init(&i915->drm,
9786 				      INTEL_NUM_PIPES(i915));
9787 		if (ret)
9788 			return ret;
9789 	}
9790 
9791 	intel_bios_init(i915);
9792 
9793 	ret = intel_vga_register(i915);
9794 	if (ret)
9795 		goto cleanup_bios;
9796 
9797 	/* FIXME: completely on the wrong abstraction layer */
9798 	intel_power_domains_init_hw(i915, false);
9799 
9800 	if (!HAS_DISPLAY(i915))
9801 		return 0;
9802 
9803 	intel_dmc_ucode_init(i915);
9804 
9805 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
9806 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
9807 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
9808 
9809 	i915->framestart_delay = 1; /* 1-4 */
9810 
9811 	i915->window2_delay = 0; /* No DSB so no window2 delay */
9812 
9813 	intel_mode_config_init(i915);
9814 
9815 	ret = intel_cdclk_init(i915);
9816 	if (ret)
9817 		goto cleanup_vga_client_pw_domain_dmc;
9818 
9819 	ret = intel_dbuf_init(i915);
9820 	if (ret)
9821 		goto cleanup_vga_client_pw_domain_dmc;
9822 
9823 	ret = intel_bw_init(i915);
9824 	if (ret)
9825 		goto cleanup_vga_client_pw_domain_dmc;
9826 
9827 	init_llist_head(&i915->atomic_helper.free_list);
9828 	INIT_WORK(&i915->atomic_helper.free_work,
9829 		  intel_atomic_helper_free_state_worker);
9830 
9831 	intel_init_quirks(i915);
9832 
9833 	intel_fbc_init(i915);
9834 
9835 	return 0;
9836 
9837 cleanup_vga_client_pw_domain_dmc:
9838 	intel_dmc_ucode_fini(i915);
9839 	intel_power_domains_driver_remove(i915);
9840 	intel_vga_unregister(i915);
9841 cleanup_bios:
9842 	intel_bios_driver_remove(i915);
9843 
9844 	return ret;
9845 }
9846 
9847 /* part #2: call after irq install, but before gem init */
9848 int intel_modeset_init_nogem(struct drm_i915_private *i915)
9849 {
9850 	struct drm_device *dev = &i915->drm;
9851 	enum pipe pipe;
9852 	struct intel_crtc *crtc;
9853 	int ret;
9854 
9855 	if (!HAS_DISPLAY(i915))
9856 		return 0;
9857 
9858 	intel_init_pm(i915);
9859 
9860 	intel_panel_sanitize_ssc(i915);
9861 
9862 	intel_pps_setup(i915);
9863 
9864 	intel_gmbus_setup(i915);
9865 
9866 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
9867 		    INTEL_NUM_PIPES(i915),
9868 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
9869 
9870 	for_each_pipe(i915, pipe) {
9871 		ret = intel_crtc_init(i915, pipe);
9872 		if (ret) {
9873 			intel_mode_config_cleanup(i915);
9874 			return ret;
9875 		}
9876 	}
9877 
9878 	intel_plane_possible_crtcs_init(i915);
9879 	intel_shared_dpll_init(dev);
9880 	intel_fdi_pll_freq_update(i915);
9881 
9882 	intel_update_czclk(i915);
9883 	intel_modeset_init_hw(i915);
9884 	intel_dpll_update_ref_clks(i915);
9885 
9886 	intel_hdcp_component_init(i915);
9887 
9888 	if (i915->max_cdclk_freq == 0)
9889 		intel_update_max_cdclk(i915);
9890 
9891 	/*
9892 	 * If the platform has HTI, we need to find out whether it has reserved
9893 	 * any display resources before we create our display outputs.
9894 	 */
9895 	if (INTEL_INFO(i915)->display.has_hti)
9896 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
9897 
9898 	/* Just disable it once at startup */
9899 	intel_vga_disable(i915);
9900 	intel_setup_outputs(i915);
9901 
9902 	drm_modeset_lock_all(dev);
9903 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
9904 	intel_acpi_assign_connector_fwnodes(i915);
9905 	drm_modeset_unlock_all(dev);
9906 
9907 	for_each_intel_crtc(dev, crtc) {
9908 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
9909 			continue;
9910 		intel_crtc_initial_plane_config(crtc);
9911 	}
9912 
9913 	/*
9914 	 * Make sure hardware watermarks really match the state we read out.
9915 	 * Note that we need to do this after reconstructing the BIOS fb's
9916 	 * since the watermark calculation done here will use pstate->fb.
9917 	 */
9918 	if (!HAS_GMCH(i915))
9919 		sanitize_watermarks(i915);
9920 
9921 	return 0;
9922 }
9923 
9924 /* part #3: call after gem init */
9925 int intel_modeset_init(struct drm_i915_private *i915)
9926 {
9927 	int ret;
9928 
9929 	if (!HAS_DISPLAY(i915))
9930 		return 0;
9931 
9932 	/*
9933 	 * Force all active planes to recompute their states. So that on
9934 	 * mode_setcrtc after probe, all the intel_plane_state variables
9935 	 * are already calculated and there is no assert_plane warnings
9936 	 * during bootup.
9937 	 */
9938 	ret = intel_initial_commit(&i915->drm);
9939 	if (ret)
9940 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
9941 
9942 	intel_overlay_setup(i915);
9943 
9944 	ret = intel_fbdev_init(&i915->drm);
9945 	if (ret)
9946 		return ret;
9947 
9948 	/* Only enable hotplug handling once the fbdev is fully set up. */
9949 	intel_hpd_init(i915);
9950 	intel_hpd_poll_disable(i915);
9951 
9952 	intel_init_ipc(i915);
9953 
9954 	return 0;
9955 }
9956 
9957 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
9958 {
9959 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
9960 	/* 640x480@60Hz, ~25175 kHz */
9961 	struct dpll clock = {
9962 		.m1 = 18,
9963 		.m2 = 7,
9964 		.p1 = 13,
9965 		.p2 = 4,
9966 		.n = 2,
9967 	};
9968 	u32 dpll, fp;
9969 	int i;
9970 
9971 	drm_WARN_ON(&dev_priv->drm,
9972 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
9973 
9974 	drm_dbg_kms(&dev_priv->drm,
9975 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
9976 		    pipe_name(pipe), clock.vco, clock.dot);
9977 
9978 	fp = i9xx_dpll_compute_fp(&clock);
9979 	dpll = DPLL_DVO_2X_MODE |
9980 		DPLL_VGA_MODE_DIS |
9981 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
9982 		PLL_P2_DIVIDE_BY_4 |
9983 		PLL_REF_INPUT_DREFCLK |
9984 		DPLL_VCO_ENABLE;
9985 
9986 	intel_de_write(dev_priv, FP0(pipe), fp);
9987 	intel_de_write(dev_priv, FP1(pipe), fp);
9988 
9989 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
9990 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
9991 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
9992 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
9993 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
9994 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
9995 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
9996 
9997 	/*
9998 	 * Apparently we need to have VGA mode enabled prior to changing
9999 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
10000 	 * dividers, even though the register value does change.
10001 	 */
10002 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
10003 	intel_de_write(dev_priv, DPLL(pipe), dpll);
10004 
10005 	/* Wait for the clocks to stabilize. */
10006 	intel_de_posting_read(dev_priv, DPLL(pipe));
10007 	udelay(150);
10008 
10009 	/* The pixel multiplier can only be updated once the
10010 	 * DPLL is enabled and the clocks are stable.
10011 	 *
10012 	 * So write it again.
10013 	 */
10014 	intel_de_write(dev_priv, DPLL(pipe), dpll);
10015 
10016 	/* We do this three times for luck */
10017 	for (i = 0; i < 3 ; i++) {
10018 		intel_de_write(dev_priv, DPLL(pipe), dpll);
10019 		intel_de_posting_read(dev_priv, DPLL(pipe));
10020 		udelay(150); /* wait for warmup */
10021 	}
10022 
10023 	intel_de_write(dev_priv, PIPECONF(pipe),
10024 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
10025 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
10026 
10027 	intel_wait_for_pipe_scanline_moving(crtc);
10028 }
10029 
10030 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
10031 {
10032 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10033 
10034 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
10035 		    pipe_name(pipe));
10036 
10037 	drm_WARN_ON(&dev_priv->drm,
10038 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
10039 		    DISPLAY_PLANE_ENABLE);
10040 	drm_WARN_ON(&dev_priv->drm,
10041 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
10042 		    DISPLAY_PLANE_ENABLE);
10043 	drm_WARN_ON(&dev_priv->drm,
10044 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
10045 		    DISPLAY_PLANE_ENABLE);
10046 	drm_WARN_ON(&dev_priv->drm,
10047 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
10048 	drm_WARN_ON(&dev_priv->drm,
10049 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
10050 
10051 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
10052 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
10053 
10054 	intel_wait_for_pipe_scanline_stopped(crtc);
10055 
10056 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
10057 	intel_de_posting_read(dev_priv, DPLL(pipe));
10058 }
10059 
10060 static void
10061 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
10062 {
10063 	struct intel_crtc *crtc;
10064 
10065 	if (DISPLAY_VER(dev_priv) >= 4)
10066 		return;
10067 
10068 	for_each_intel_crtc(&dev_priv->drm, crtc) {
10069 		struct intel_plane *plane =
10070 			to_intel_plane(crtc->base.primary);
10071 		struct intel_crtc *plane_crtc;
10072 		enum pipe pipe;
10073 
10074 		if (!plane->get_hw_state(plane, &pipe))
10075 			continue;
10076 
10077 		if (pipe == crtc->pipe)
10078 			continue;
10079 
10080 		drm_dbg_kms(&dev_priv->drm,
10081 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
10082 			    plane->base.base.id, plane->base.name);
10083 
10084 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10085 		intel_plane_disable_noatomic(plane_crtc, plane);
10086 	}
10087 }
10088 
10089 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
10090 {
10091 	struct drm_device *dev = crtc->base.dev;
10092 	struct intel_encoder *encoder;
10093 
10094 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
10095 		return true;
10096 
10097 	return false;
10098 }
10099 
10100 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
10101 {
10102 	struct drm_device *dev = encoder->base.dev;
10103 	struct intel_connector *connector;
10104 
10105 	for_each_connector_on_encoder(dev, &encoder->base, connector)
10106 		return connector;
10107 
10108 	return NULL;
10109 }
10110 
10111 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
10112 			      enum pipe pch_transcoder)
10113 {
10114 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
10115 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
10116 }
10117 
10118 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
10119 {
10120 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10121 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10122 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10123 
10124 	if (DISPLAY_VER(dev_priv) >= 9 ||
10125 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
10126 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
10127 		u32 val;
10128 
10129 		if (transcoder_is_dsi(cpu_transcoder))
10130 			return;
10131 
10132 		val = intel_de_read(dev_priv, reg);
10133 		val &= ~HSW_FRAME_START_DELAY_MASK;
10134 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10135 		intel_de_write(dev_priv, reg, val);
10136 	} else {
10137 		i915_reg_t reg = PIPECONF(cpu_transcoder);
10138 		u32 val;
10139 
10140 		val = intel_de_read(dev_priv, reg);
10141 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
10142 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10143 		intel_de_write(dev_priv, reg, val);
10144 	}
10145 
10146 	if (!crtc_state->has_pch_encoder)
10147 		return;
10148 
10149 	if (HAS_PCH_IBX(dev_priv)) {
10150 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
10151 		u32 val;
10152 
10153 		val = intel_de_read(dev_priv, reg);
10154 		val &= ~TRANS_FRAME_START_DELAY_MASK;
10155 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10156 		intel_de_write(dev_priv, reg, val);
10157 	} else {
10158 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
10159 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
10160 		u32 val;
10161 
10162 		val = intel_de_read(dev_priv, reg);
10163 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
10164 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
10165 		intel_de_write(dev_priv, reg, val);
10166 	}
10167 }
10168 
10169 static void intel_sanitize_crtc(struct intel_crtc *crtc,
10170 				struct drm_modeset_acquire_ctx *ctx)
10171 {
10172 	struct drm_device *dev = crtc->base.dev;
10173 	struct drm_i915_private *dev_priv = to_i915(dev);
10174 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
10175 
10176 	if (crtc_state->hw.active) {
10177 		struct intel_plane *plane;
10178 
10179 		/* Clear any frame start delays used for debugging left by the BIOS */
10180 		intel_sanitize_frame_start_delay(crtc_state);
10181 
10182 		/* Disable everything but the primary plane */
10183 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
10184 			const struct intel_plane_state *plane_state =
10185 				to_intel_plane_state(plane->base.state);
10186 
10187 			if (plane_state->uapi.visible &&
10188 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
10189 				intel_plane_disable_noatomic(crtc, plane);
10190 		}
10191 
10192 		/* Disable any background color/etc. set by the BIOS */
10193 		intel_color_commit(crtc_state);
10194 	}
10195 
10196 	/* Adjust the state of the output pipe according to whether we
10197 	 * have active connectors/encoders. */
10198 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
10199 	    !crtc_state->bigjoiner_slave)
10200 		intel_crtc_disable_noatomic(crtc, ctx);
10201 
10202 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
10203 		/*
10204 		 * We start out with underrun reporting disabled to avoid races.
10205 		 * For correct bookkeeping mark this on active crtcs.
10206 		 *
10207 		 * Also on gmch platforms we dont have any hardware bits to
10208 		 * disable the underrun reporting. Which means we need to start
10209 		 * out with underrun reporting disabled also on inactive pipes,
10210 		 * since otherwise we'll complain about the garbage we read when
10211 		 * e.g. coming up after runtime pm.
10212 		 *
10213 		 * No protection against concurrent access is required - at
10214 		 * worst a fifo underrun happens which also sets this to false.
10215 		 */
10216 		crtc->cpu_fifo_underrun_disabled = true;
10217 		/*
10218 		 * We track the PCH trancoder underrun reporting state
10219 		 * within the crtc. With crtc for pipe A housing the underrun
10220 		 * reporting state for PCH transcoder A, crtc for pipe B housing
10221 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
10222 		 * and marking underrun reporting as disabled for the non-existing
10223 		 * PCH transcoders B and C would prevent enabling the south
10224 		 * error interrupt (see cpt_can_enable_serr_int()).
10225 		 */
10226 		if (has_pch_trancoder(dev_priv, crtc->pipe))
10227 			crtc->pch_fifo_underrun_disabled = true;
10228 	}
10229 }
10230 
10231 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
10232 {
10233 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
10234 
10235 	/*
10236 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
10237 	 * the hardware when a high res displays plugged in. DPLL P
10238 	 * divider is zero, and the pipe timings are bonkers. We'll
10239 	 * try to disable everything in that case.
10240 	 *
10241 	 * FIXME would be nice to be able to sanitize this state
10242 	 * without several WARNs, but for now let's take the easy
10243 	 * road.
10244 	 */
10245 	return IS_SANDYBRIDGE(dev_priv) &&
10246 		crtc_state->hw.active &&
10247 		crtc_state->shared_dpll &&
10248 		crtc_state->port_clock == 0;
10249 }
10250 
10251 static void intel_sanitize_encoder(struct intel_encoder *encoder)
10252 {
10253 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10254 	struct intel_connector *connector;
10255 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
10256 	struct intel_crtc_state *crtc_state = crtc ?
10257 		to_intel_crtc_state(crtc->base.state) : NULL;
10258 
10259 	/* We need to check both for a crtc link (meaning that the
10260 	 * encoder is active and trying to read from a pipe) and the
10261 	 * pipe itself being active. */
10262 	bool has_active_crtc = crtc_state &&
10263 		crtc_state->hw.active;
10264 
10265 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
10266 		drm_dbg_kms(&dev_priv->drm,
10267 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
10268 			    pipe_name(crtc->pipe));
10269 		has_active_crtc = false;
10270 	}
10271 
10272 	connector = intel_encoder_find_connector(encoder);
10273 	if (connector && !has_active_crtc) {
10274 		drm_dbg_kms(&dev_priv->drm,
10275 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10276 			    encoder->base.base.id,
10277 			    encoder->base.name);
10278 
10279 		/* Connector is active, but has no active pipe. This is
10280 		 * fallout from our resume register restoring. Disable
10281 		 * the encoder manually again. */
10282 		if (crtc_state) {
10283 			struct drm_encoder *best_encoder;
10284 
10285 			drm_dbg_kms(&dev_priv->drm,
10286 				    "[ENCODER:%d:%s] manually disabled\n",
10287 				    encoder->base.base.id,
10288 				    encoder->base.name);
10289 
10290 			/* avoid oopsing in case the hooks consult best_encoder */
10291 			best_encoder = connector->base.state->best_encoder;
10292 			connector->base.state->best_encoder = &encoder->base;
10293 
10294 			/* FIXME NULL atomic state passed! */
10295 			if (encoder->disable)
10296 				encoder->disable(NULL, encoder, crtc_state,
10297 						 connector->base.state);
10298 			if (encoder->post_disable)
10299 				encoder->post_disable(NULL, encoder, crtc_state,
10300 						      connector->base.state);
10301 
10302 			connector->base.state->best_encoder = best_encoder;
10303 		}
10304 		encoder->base.crtc = NULL;
10305 
10306 		/* Inconsistent output/port/pipe state happens presumably due to
10307 		 * a bug in one of the get_hw_state functions. Or someplace else
10308 		 * in our code, like the register restore mess on resume. Clamp
10309 		 * things to off as a safer default. */
10310 
10311 		connector->base.dpms = DRM_MODE_DPMS_OFF;
10312 		connector->base.encoder = NULL;
10313 	}
10314 
10315 	/* notify opregion of the sanitized encoder state */
10316 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
10317 
10318 	if (HAS_DDI(dev_priv))
10319 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
10320 }
10321 
10322 /* FIXME read out full plane state for all planes */
10323 static void readout_plane_state(struct drm_i915_private *dev_priv)
10324 {
10325 	struct intel_plane *plane;
10326 	struct intel_crtc *crtc;
10327 
10328 	for_each_intel_plane(&dev_priv->drm, plane) {
10329 		struct intel_plane_state *plane_state =
10330 			to_intel_plane_state(plane->base.state);
10331 		struct intel_crtc_state *crtc_state;
10332 		enum pipe pipe = PIPE_A;
10333 		bool visible;
10334 
10335 		visible = plane->get_hw_state(plane, &pipe);
10336 
10337 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10338 		crtc_state = to_intel_crtc_state(crtc->base.state);
10339 
10340 		intel_set_plane_visible(crtc_state, plane_state, visible);
10341 
10342 		drm_dbg_kms(&dev_priv->drm,
10343 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
10344 			    plane->base.base.id, plane->base.name,
10345 			    enableddisabled(visible), pipe_name(pipe));
10346 	}
10347 
10348 	for_each_intel_crtc(&dev_priv->drm, crtc) {
10349 		struct intel_crtc_state *crtc_state =
10350 			to_intel_crtc_state(crtc->base.state);
10351 
10352 		fixup_plane_bitmasks(crtc_state);
10353 	}
10354 }
10355 
10356 static void intel_modeset_readout_hw_state(struct drm_device *dev)
10357 {
10358 	struct drm_i915_private *dev_priv = to_i915(dev);
10359 	struct intel_cdclk_state *cdclk_state =
10360 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
10361 	struct intel_dbuf_state *dbuf_state =
10362 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
10363 	enum pipe pipe;
10364 	struct intel_crtc *crtc;
10365 	struct intel_encoder *encoder;
10366 	struct intel_connector *connector;
10367 	struct drm_connector_list_iter conn_iter;
10368 	u8 active_pipes = 0;
10369 
10370 	for_each_intel_crtc(dev, crtc) {
10371 		struct intel_crtc_state *crtc_state =
10372 			to_intel_crtc_state(crtc->base.state);
10373 
10374 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
10375 		intel_crtc_free_hw_state(crtc_state);
10376 		intel_crtc_state_reset(crtc_state, crtc);
10377 
10378 		intel_crtc_get_pipe_config(crtc_state);
10379 
10380 		crtc_state->hw.enable = crtc_state->hw.active;
10381 
10382 		crtc->base.enabled = crtc_state->hw.enable;
10383 		crtc->active = crtc_state->hw.active;
10384 
10385 		if (crtc_state->hw.active)
10386 			active_pipes |= BIT(crtc->pipe);
10387 
10388 		drm_dbg_kms(&dev_priv->drm,
10389 			    "[CRTC:%d:%s] hw state readout: %s\n",
10390 			    crtc->base.base.id, crtc->base.name,
10391 			    enableddisabled(crtc_state->hw.active));
10392 	}
10393 
10394 	cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
10395 
10396 	readout_plane_state(dev_priv);
10397 
10398 	for_each_intel_encoder(dev, encoder) {
10399 		struct intel_crtc_state *crtc_state = NULL;
10400 
10401 		pipe = 0;
10402 
10403 		if (encoder->get_hw_state(encoder, &pipe)) {
10404 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10405 			crtc_state = to_intel_crtc_state(crtc->base.state);
10406 
10407 			encoder->base.crtc = &crtc->base;
10408 			intel_encoder_get_config(encoder, crtc_state);
10409 
10410 			/* read out to slave crtc as well for bigjoiner */
10411 			if (crtc_state->bigjoiner) {
10412 				/* encoder should read be linked to bigjoiner master */
10413 				WARN_ON(crtc_state->bigjoiner_slave);
10414 
10415 				crtc = crtc_state->bigjoiner_linked_crtc;
10416 				crtc_state = to_intel_crtc_state(crtc->base.state);
10417 				intel_encoder_get_config(encoder, crtc_state);
10418 			}
10419 		} else {
10420 			encoder->base.crtc = NULL;
10421 		}
10422 
10423 		if (encoder->sync_state)
10424 			encoder->sync_state(encoder, crtc_state);
10425 
10426 		drm_dbg_kms(&dev_priv->drm,
10427 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
10428 			    encoder->base.base.id, encoder->base.name,
10429 			    enableddisabled(encoder->base.crtc),
10430 			    pipe_name(pipe));
10431 	}
10432 
10433 	intel_dpll_readout_hw_state(dev_priv);
10434 
10435 	drm_connector_list_iter_begin(dev, &conn_iter);
10436 	for_each_intel_connector_iter(connector, &conn_iter) {
10437 		if (connector->get_hw_state(connector)) {
10438 			struct intel_crtc_state *crtc_state;
10439 			struct intel_crtc *crtc;
10440 
10441 			connector->base.dpms = DRM_MODE_DPMS_ON;
10442 
10443 			encoder = intel_attached_encoder(connector);
10444 			connector->base.encoder = &encoder->base;
10445 
10446 			crtc = to_intel_crtc(encoder->base.crtc);
10447 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
10448 
10449 			if (crtc_state && crtc_state->hw.active) {
10450 				/*
10451 				 * This has to be done during hardware readout
10452 				 * because anything calling .crtc_disable may
10453 				 * rely on the connector_mask being accurate.
10454 				 */
10455 				crtc_state->uapi.connector_mask |=
10456 					drm_connector_mask(&connector->base);
10457 				crtc_state->uapi.encoder_mask |=
10458 					drm_encoder_mask(&encoder->base);
10459 			}
10460 		} else {
10461 			connector->base.dpms = DRM_MODE_DPMS_OFF;
10462 			connector->base.encoder = NULL;
10463 		}
10464 		drm_dbg_kms(&dev_priv->drm,
10465 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
10466 			    connector->base.base.id, connector->base.name,
10467 			    enableddisabled(connector->base.encoder));
10468 	}
10469 	drm_connector_list_iter_end(&conn_iter);
10470 
10471 	for_each_intel_crtc(dev, crtc) {
10472 		struct intel_bw_state *bw_state =
10473 			to_intel_bw_state(dev_priv->bw_obj.state);
10474 		struct intel_crtc_state *crtc_state =
10475 			to_intel_crtc_state(crtc->base.state);
10476 		struct intel_plane *plane;
10477 		int min_cdclk = 0;
10478 
10479 		if (crtc_state->hw.active) {
10480 			/*
10481 			 * The initial mode needs to be set in order to keep
10482 			 * the atomic core happy. It wants a valid mode if the
10483 			 * crtc's enabled, so we do the above call.
10484 			 *
10485 			 * But we don't set all the derived state fully, hence
10486 			 * set a flag to indicate that a full recalculation is
10487 			 * needed on the next commit.
10488 			 */
10489 			crtc_state->inherited = true;
10490 
10491 			intel_crtc_update_active_timings(crtc_state);
10492 
10493 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
10494 		}
10495 
10496 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
10497 			const struct intel_plane_state *plane_state =
10498 				to_intel_plane_state(plane->base.state);
10499 
10500 			/*
10501 			 * FIXME don't have the fb yet, so can't
10502 			 * use intel_plane_data_rate() :(
10503 			 */
10504 			if (plane_state->uapi.visible)
10505 				crtc_state->data_rate[plane->id] =
10506 					4 * crtc_state->pixel_rate;
10507 			/*
10508 			 * FIXME don't have the fb yet, so can't
10509 			 * use plane->min_cdclk() :(
10510 			 */
10511 			if (plane_state->uapi.visible && plane->min_cdclk) {
10512 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
10513 					crtc_state->min_cdclk[plane->id] =
10514 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
10515 				else
10516 					crtc_state->min_cdclk[plane->id] =
10517 						crtc_state->pixel_rate;
10518 			}
10519 			drm_dbg_kms(&dev_priv->drm,
10520 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
10521 				    plane->base.base.id, plane->base.name,
10522 				    crtc_state->min_cdclk[plane->id]);
10523 		}
10524 
10525 		if (crtc_state->hw.active) {
10526 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
10527 			if (drm_WARN_ON(dev, min_cdclk < 0))
10528 				min_cdclk = 0;
10529 		}
10530 
10531 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
10532 		cdclk_state->min_voltage_level[crtc->pipe] =
10533 			crtc_state->min_voltage_level;
10534 
10535 		intel_bw_crtc_update(bw_state, crtc_state);
10536 
10537 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
10538 	}
10539 }
10540 
10541 static void
10542 get_encoder_power_domains(struct drm_i915_private *dev_priv)
10543 {
10544 	struct intel_encoder *encoder;
10545 
10546 	for_each_intel_encoder(&dev_priv->drm, encoder) {
10547 		struct intel_crtc_state *crtc_state;
10548 
10549 		if (!encoder->get_power_domains)
10550 			continue;
10551 
10552 		/*
10553 		 * MST-primary and inactive encoders don't have a crtc state
10554 		 * and neither of these require any power domain references.
10555 		 */
10556 		if (!encoder->base.crtc)
10557 			continue;
10558 
10559 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
10560 		encoder->get_power_domains(encoder, crtc_state);
10561 	}
10562 }
10563 
10564 static void intel_early_display_was(struct drm_i915_private *dev_priv)
10565 {
10566 	/*
10567 	 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
10568 	 * Also known as Wa_14010480278.
10569 	 */
10570 	if (IS_DISPLAY_VER(dev_priv, 10, 12))
10571 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
10572 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
10573 
10574 	if (IS_HASWELL(dev_priv)) {
10575 		/*
10576 		 * WaRsPkgCStateDisplayPMReq:hsw
10577 		 * System hang if this isn't done before disabling all planes!
10578 		 */
10579 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
10580 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
10581 	}
10582 
10583 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
10584 		/* Display WA #1142:kbl,cfl,cml */
10585 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
10586 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
10587 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
10588 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
10589 			     KBL_ARB_FILL_SPARE_14);
10590 	}
10591 }
10592 
10593 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
10594 				       enum port port, i915_reg_t hdmi_reg)
10595 {
10596 	u32 val = intel_de_read(dev_priv, hdmi_reg);
10597 
10598 	if (val & SDVO_ENABLE ||
10599 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
10600 		return;
10601 
10602 	drm_dbg_kms(&dev_priv->drm,
10603 		    "Sanitizing transcoder select for HDMI %c\n",
10604 		    port_name(port));
10605 
10606 	val &= ~SDVO_PIPE_SEL_MASK;
10607 	val |= SDVO_PIPE_SEL(PIPE_A);
10608 
10609 	intel_de_write(dev_priv, hdmi_reg, val);
10610 }
10611 
10612 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
10613 				     enum port port, i915_reg_t dp_reg)
10614 {
10615 	u32 val = intel_de_read(dev_priv, dp_reg);
10616 
10617 	if (val & DP_PORT_EN ||
10618 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
10619 		return;
10620 
10621 	drm_dbg_kms(&dev_priv->drm,
10622 		    "Sanitizing transcoder select for DP %c\n",
10623 		    port_name(port));
10624 
10625 	val &= ~DP_PIPE_SEL_MASK;
10626 	val |= DP_PIPE_SEL(PIPE_A);
10627 
10628 	intel_de_write(dev_priv, dp_reg, val);
10629 }
10630 
10631 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
10632 {
10633 	/*
10634 	 * The BIOS may select transcoder B on some of the PCH
10635 	 * ports even it doesn't enable the port. This would trip
10636 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
10637 	 * Sanitize the transcoder select bits to prevent that. We
10638 	 * assume that the BIOS never actually enabled the port,
10639 	 * because if it did we'd actually have to toggle the port
10640 	 * on and back off to make the transcoder A select stick
10641 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
10642 	 * intel_disable_sdvo()).
10643 	 */
10644 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
10645 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
10646 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
10647 
10648 	/* PCH SDVOB multiplex with HDMIB */
10649 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
10650 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
10651 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
10652 }
10653 
10654 /* Scan out the current hw modeset state,
10655  * and sanitizes it to the current state
10656  */
10657 static void
10658 intel_modeset_setup_hw_state(struct drm_device *dev,
10659 			     struct drm_modeset_acquire_ctx *ctx)
10660 {
10661 	struct drm_i915_private *dev_priv = to_i915(dev);
10662 	struct intel_encoder *encoder;
10663 	struct intel_crtc *crtc;
10664 	intel_wakeref_t wakeref;
10665 
10666 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
10667 
10668 	intel_early_display_was(dev_priv);
10669 	intel_modeset_readout_hw_state(dev);
10670 
10671 	/* HW state is read out, now we need to sanitize this mess. */
10672 	get_encoder_power_domains(dev_priv);
10673 
10674 	if (HAS_PCH_IBX(dev_priv))
10675 		ibx_sanitize_pch_ports(dev_priv);
10676 
10677 	/*
10678 	 * intel_sanitize_plane_mapping() may need to do vblank
10679 	 * waits, so we need vblank interrupts restored beforehand.
10680 	 */
10681 	for_each_intel_crtc(&dev_priv->drm, crtc) {
10682 		struct intel_crtc_state *crtc_state =
10683 			to_intel_crtc_state(crtc->base.state);
10684 
10685 		drm_crtc_vblank_reset(&crtc->base);
10686 
10687 		if (crtc_state->hw.active)
10688 			intel_crtc_vblank_on(crtc_state);
10689 	}
10690 
10691 	intel_sanitize_plane_mapping(dev_priv);
10692 
10693 	for_each_intel_encoder(dev, encoder)
10694 		intel_sanitize_encoder(encoder);
10695 
10696 	for_each_intel_crtc(&dev_priv->drm, crtc) {
10697 		struct intel_crtc_state *crtc_state =
10698 			to_intel_crtc_state(crtc->base.state);
10699 
10700 		intel_sanitize_crtc(crtc, ctx);
10701 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
10702 	}
10703 
10704 	intel_modeset_update_connector_atomic_state(dev);
10705 
10706 	intel_dpll_sanitize_state(dev_priv);
10707 
10708 	if (IS_G4X(dev_priv)) {
10709 		g4x_wm_get_hw_state(dev_priv);
10710 		g4x_wm_sanitize(dev_priv);
10711 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10712 		vlv_wm_get_hw_state(dev_priv);
10713 		vlv_wm_sanitize(dev_priv);
10714 	} else if (DISPLAY_VER(dev_priv) >= 9) {
10715 		skl_wm_get_hw_state(dev_priv);
10716 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10717 		ilk_wm_get_hw_state(dev_priv);
10718 	}
10719 
10720 	for_each_intel_crtc(dev, crtc) {
10721 		struct intel_crtc_state *crtc_state =
10722 			to_intel_crtc_state(crtc->base.state);
10723 		u64 put_domains;
10724 
10725 		put_domains = modeset_get_crtc_power_domains(crtc_state);
10726 		if (drm_WARN_ON(dev, put_domains))
10727 			modeset_put_crtc_power_domains(crtc, put_domains);
10728 	}
10729 
10730 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
10731 }
10732 
10733 void intel_display_resume(struct drm_device *dev)
10734 {
10735 	struct drm_i915_private *dev_priv = to_i915(dev);
10736 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
10737 	struct drm_modeset_acquire_ctx ctx;
10738 	int ret;
10739 
10740 	if (!HAS_DISPLAY(dev_priv))
10741 		return;
10742 
10743 	dev_priv->modeset_restore_state = NULL;
10744 	if (state)
10745 		state->acquire_ctx = &ctx;
10746 
10747 	drm_modeset_acquire_init(&ctx, 0);
10748 
10749 	while (1) {
10750 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
10751 		if (ret != -EDEADLK)
10752 			break;
10753 
10754 		drm_modeset_backoff(&ctx);
10755 	}
10756 
10757 	if (!ret)
10758 		ret = __intel_display_resume(dev, state, &ctx);
10759 
10760 	intel_enable_ipc(dev_priv);
10761 	drm_modeset_drop_locks(&ctx);
10762 	drm_modeset_acquire_fini(&ctx);
10763 
10764 	if (ret)
10765 		drm_err(&dev_priv->drm,
10766 			"Restoring old state failed with %i\n", ret);
10767 	if (state)
10768 		drm_atomic_state_put(state);
10769 }
10770 
10771 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
10772 {
10773 	struct intel_connector *connector;
10774 	struct drm_connector_list_iter conn_iter;
10775 
10776 	/* Kill all the work that may have been queued by hpd. */
10777 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
10778 	for_each_intel_connector_iter(connector, &conn_iter) {
10779 		if (connector->modeset_retry_work.func)
10780 			cancel_work_sync(&connector->modeset_retry_work);
10781 		if (connector->hdcp.shim) {
10782 			cancel_delayed_work_sync(&connector->hdcp.check_work);
10783 			cancel_work_sync(&connector->hdcp.prop_work);
10784 		}
10785 	}
10786 	drm_connector_list_iter_end(&conn_iter);
10787 }
10788 
10789 /* part #1: call before irq uninstall */
10790 void intel_modeset_driver_remove(struct drm_i915_private *i915)
10791 {
10792 	if (!HAS_DISPLAY(i915))
10793 		return;
10794 
10795 	flush_workqueue(i915->flip_wq);
10796 	flush_workqueue(i915->modeset_wq);
10797 
10798 	flush_work(&i915->atomic_helper.free_work);
10799 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
10800 }
10801 
10802 /* part #2: call after irq uninstall */
10803 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
10804 {
10805 	if (!HAS_DISPLAY(i915))
10806 		return;
10807 
10808 	/*
10809 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
10810 	 * poll handlers. Hence disable polling after hpd handling is shut down.
10811 	 */
10812 	intel_hpd_poll_fini(i915);
10813 
10814 	/*
10815 	 * MST topology needs to be suspended so we don't have any calls to
10816 	 * fbdev after it's finalized. MST will be destroyed later as part of
10817 	 * drm_mode_config_cleanup()
10818 	 */
10819 	intel_dp_mst_suspend(i915);
10820 
10821 	/* poll work can call into fbdev, hence clean that up afterwards */
10822 	intel_fbdev_fini(i915);
10823 
10824 	intel_unregister_dsm_handler();
10825 
10826 	intel_fbc_global_disable(i915);
10827 
10828 	/* flush any delayed tasks or pending work */
10829 	flush_scheduled_work();
10830 
10831 	intel_hdcp_component_fini(i915);
10832 
10833 	intel_mode_config_cleanup(i915);
10834 
10835 	intel_overlay_cleanup(i915);
10836 
10837 	intel_gmbus_teardown(i915);
10838 
10839 	destroy_workqueue(i915->flip_wq);
10840 	destroy_workqueue(i915->modeset_wq);
10841 
10842 	intel_fbc_cleanup(i915);
10843 }
10844 
10845 /* part #3: call after gem init */
10846 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
10847 {
10848 	intel_dmc_ucode_fini(i915);
10849 
10850 	intel_power_domains_driver_remove(i915);
10851 
10852 	intel_vga_unregister(i915);
10853 
10854 	intel_bios_driver_remove(i915);
10855 }
10856 
10857 void intel_display_driver_register(struct drm_i915_private *i915)
10858 {
10859 	if (!HAS_DISPLAY(i915))
10860 		return;
10861 
10862 	intel_display_debugfs_register(i915);
10863 
10864 	/* Must be done after probing outputs */
10865 	intel_opregion_register(i915);
10866 	acpi_video_register();
10867 
10868 	intel_audio_init(i915);
10869 
10870 	/*
10871 	 * Some ports require correctly set-up hpd registers for
10872 	 * detection to work properly (leading to ghost connected
10873 	 * connector status), e.g. VGA on gm45.  Hence we can only set
10874 	 * up the initial fbdev config after hpd irqs are fully
10875 	 * enabled. We do it last so that the async config cannot run
10876 	 * before the connectors are registered.
10877 	 */
10878 	intel_fbdev_initial_config_async(&i915->drm);
10879 
10880 	/*
10881 	 * We need to coordinate the hotplugs with the asynchronous
10882 	 * fbdev configuration, for which we use the
10883 	 * fbdev->async_cookie.
10884 	 */
10885 	drm_kms_helper_poll_init(&i915->drm);
10886 }
10887 
10888 void intel_display_driver_unregister(struct drm_i915_private *i915)
10889 {
10890 	if (!HAS_DISPLAY(i915))
10891 		return;
10892 
10893 	intel_fbdev_unregister(i915);
10894 	intel_audio_deinit(i915);
10895 
10896 	/*
10897 	 * After flushing the fbdev (incl. a late async config which
10898 	 * will have delayed queuing of a hotplug event), then flush
10899 	 * the hotplug events.
10900 	 */
10901 	drm_kms_helper_poll_fini(&i915->drm);
10902 	drm_atomic_helper_shutdown(&i915->drm);
10903 
10904 	acpi_video_unregister();
10905 	intel_opregion_unregister(i915);
10906 }
10907